[clang] a83eda5 - [RISCV][NFC] Deleted useless lines in clang tests.

Shao-Ce SUN via cfe-commits cfe-commits at lists.llvm.org
Tue Sep 21 19:26:23 PDT 2021


Author: Shao-Ce SUN
Date: 2021-09-22T10:25:57+08:00
New Revision: a83eda591c1032e24100f9bf551db9bc7a303f97

URL: https://github.com/llvm/llvm-project/commit/a83eda591c1032e24100f9bf551db9bc7a303f97
DIFF: https://github.com/llvm/llvm-project/commit/a83eda591c1032e24100f9bf551db9bc7a303f97.diff

LOG: [RISCV][NFC] Deleted useless lines in clang tests.

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmnot.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
index 580ec0c4e07c5..d56e97dfd1022 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vaadd_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vaadd_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vaadd_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vaadd_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vaadd_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vaadd_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vaadd_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vaadd_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vaadd_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vaadd_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vaadd_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vaadd_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vaadd_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vaadd_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vaadd_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vaadd_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vaadd_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vaadd_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vaadd_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vaadd_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vaadd_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vaadd_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vaadd_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vaadd_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vaadd_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vaadd_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vaadd_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vaadd_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vaadd_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vaadd_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vaadd_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vaadd_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vaadd_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vaadd_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vaadd_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vaadd_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vaadd_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vaadd_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vaaddu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vaaddu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vaaddu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vaaddu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vaaddu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vaaddu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vaaddu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vaaddu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vaaddu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vaaddu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vaaddu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vaaddu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vaaddu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vaaddu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vaaddu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vaaddu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vaaddu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vaaddu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vaaddu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vaaddu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vaaddu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vaaddu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vaaddu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vaaddu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vaaddu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vaaddu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vaaddu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vaaddu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vaaddu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vaaddu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vaaddu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vaaddu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vaaddu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vaaddu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vaaddu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vaaddu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vaaddu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -887,7 +799,6 @@ vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vaaddu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -898,7 +809,6 @@ vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vaadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -909,7 +819,6 @@ vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vaadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +829,6 @@ vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vaadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vaadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -942,7 +849,6 @@ vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vaadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -953,7 +859,6 @@ vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vaadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +869,6 @@ vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -975,7 +879,6 @@ vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -986,7 +889,6 @@ vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -997,7 +899,6 @@ vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1008,7 +909,6 @@ vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1019,7 +919,6 @@ vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +929,6 @@ vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +939,6 @@ vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +950,6 @@ vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vaadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +960,6 @@ vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vaadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +971,6 @@ vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vaadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1087,7 +981,6 @@ vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vaadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1098,7 +991,6 @@ vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1109,7 +1001,6 @@ vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1120,7 +1011,6 @@ vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1021,6 @@ vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1031,6 @@ vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1153,7 +1041,6 @@ vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1051,6 @@ vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1175,7 +1061,6 @@ vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1072,6 @@ vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vaadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1198,7 +1082,6 @@ vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vaadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1209,7 +1092,6 @@ vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1220,7 +1102,6 @@ vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1231,7 +1112,6 @@ vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1242,7 +1122,6 @@ vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1253,7 +1132,6 @@ vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1142,6 @@ vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1275,7 +1152,6 @@ vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1162,6 @@ vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1297,7 +1172,6 @@ vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1308,7 +1182,6 @@ vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1319,7 +1192,6 @@ vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1330,7 +1202,6 @@ vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1341,7 +1212,6 @@ vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1352,7 +1222,6 @@ vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1363,7 +1232,6 @@ vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1242,6 @@ vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1253,6 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vaaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1397,7 +1263,6 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vaaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1274,6 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vaaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1420,7 +1284,6 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vaaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1295,6 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vaaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1443,7 +1305,6 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vaaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1315,6 @@ vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vaaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1325,6 @@ vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vaaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1335,6 @@ vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vaaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1487,7 +1345,6 @@ vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vaaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1498,7 +1355,6 @@ vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vaaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1509,7 +1365,6 @@ vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vaaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1520,7 +1375,6 @@ vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vaaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1531,7 +1385,6 @@ vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vaaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1396,6 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vaaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1407,6 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vaaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1418,6 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vaaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1429,6 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vaaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1440,6 @@ vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vaaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1602,7 +1450,6 @@ vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vaaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1461,6 @@ vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vaaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1471,6 @@ vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vaaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1482,6 @@ vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vaaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vaaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vaaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vaaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vaaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vaaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vaaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vaaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vaaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vaaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1588,6 @@ vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vaaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1598,6 @@ vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vaaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1609,6 @@ vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vaaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1619,6 @@ vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vaaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1630,6 @@ vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vaaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1810,7 +1640,6 @@ vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vaaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1651,6 @@ vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vaaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1833,7 +1661,6 @@ vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vaaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1672,6 @@ vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vaaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1682,6 @@ vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vaaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1693,6 @@ vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vaaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
index e4dec50d2678a..03e4c29b552ec 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
   return vadc_vvm_i8mf8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
   return vadc_vxm_i8mf8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
   return vadc_vvm_i8mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
   return vadc_vxm_i8mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
   return vadc_vvm_i8mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
   return vadc_vxm_i8mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
   return vadc_vvm_i8m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
   return vadc_vxm_i8m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
   return vadc_vvm_i8m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
   return vadc_vxm_i8m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
   return vadc_vvm_i8m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
   return vadc_vxm_i8m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
   return vadc_vvm_i8m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
   return vadc_vxm_i8m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
   return vadc_vvm_i16mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
   return vadc_vxm_i16mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
   return vadc_vvm_i16mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
   return vadc_vxm_i16mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
   return vadc_vvm_i16m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
   return vadc_vxm_i16m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
   return vadc_vvm_i16m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
   return vadc_vxm_i16m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
   return vadc_vvm_i16m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
   return vadc_vxm_i16m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
   return vadc_vvm_i16m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
   return vadc_vxm_i16m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
   return vadc_vvm_i32mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
   return vadc_vxm_i32mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
   return vadc_vvm_i32m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
   return vadc_vxm_i32m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
   return vadc_vvm_i32m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
   return vadc_vxm_i32m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
   return vadc_vvm_i32m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
   return vadc_vxm_i32m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
   return vadc_vvm_i32m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
   return vadc_vxm_i32m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
   return vadc_vvm_i64m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
   return vadc_vxm_i64m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
   return vadc_vvm_i64m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
   return vadc_vxm_i64m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
   return vadc_vvm_i64m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
   return vadc_vxm_i64m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
   return vadc_vvm_i64m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
   return vadc_vxm_i64m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -499,7 +454,6 @@ vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
   return vadc_vvm_u8mf8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -510,7 +464,6 @@ vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
   return vadc_vxm_u8mf8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +474,6 @@ vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
   return vadc_vvm_u8mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +484,6 @@ vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
   return vadc_vxm_u8mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -543,7 +494,6 @@ vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
   return vadc_vvm_u8mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -554,7 +504,6 @@ vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
   return vadc_vxm_u8mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -565,7 +514,6 @@ vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
   return vadc_vvm_u8m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -576,7 +524,6 @@ vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
   return vadc_vxm_u8m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -587,7 +534,6 @@ vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
   return vadc_vvm_u8m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -598,7 +544,6 @@ vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
   return vadc_vxm_u8m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -609,7 +554,6 @@ vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
   return vadc_vvm_u8m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -620,7 +564,6 @@ vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
   return vadc_vxm_u8m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -631,7 +574,6 @@ vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
   return vadc_vvm_u8m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -642,7 +584,6 @@ vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
   return vadc_vxm_u8m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -653,7 +594,6 @@ vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vadc_vvm_u16mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -664,7 +604,6 @@ vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
   return vadc_vxm_u16mf4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -675,7 +614,6 @@ vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vadc_vvm_u16mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -686,7 +624,6 @@ vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
   return vadc_vxm_u16mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -697,7 +634,6 @@ vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
   return vadc_vvm_u16m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -708,7 +644,6 @@ vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
   return vadc_vxm_u16m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -719,7 +654,6 @@ vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
   return vadc_vvm_u16m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -730,7 +664,6 @@ vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
   return vadc_vxm_u16m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -741,7 +674,6 @@ vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
   return vadc_vvm_u16m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -752,7 +684,6 @@ vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
   return vadc_vxm_u16m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -763,7 +694,6 @@ vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
   return vadc_vvm_u16m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +704,6 @@ vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
   return vadc_vxm_u16m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vadc_vvm_u32mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -796,7 +724,6 @@ vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
   return vadc_vxm_u32mf2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -807,7 +734,6 @@ vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
   return vadc_vvm_u32m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -818,7 +744,6 @@ vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
   return vadc_vxm_u32m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -829,7 +754,6 @@ vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
   return vadc_vvm_u32m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -840,7 +764,6 @@ vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
   return vadc_vxm_u32m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -851,7 +774,6 @@ vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
   return vadc_vvm_u32m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -862,7 +784,6 @@ vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
   return vadc_vxm_u32m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -873,7 +794,6 @@ vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
   return vadc_vvm_u32m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -884,7 +804,6 @@ vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
   return vadc_vxm_u32m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -895,7 +814,6 @@ vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
   return vadc_vvm_u64m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -906,7 +824,6 @@ vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
   return vadc_vxm_u64m1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -917,7 +834,6 @@ vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
   return vadc_vvm_u64m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -928,7 +844,6 @@ vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
   return vadc_vxm_u64m2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -939,7 +854,6 @@ vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
   return vadc_vvm_u64m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -950,7 +864,6 @@ vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
   return vadc_vxm_u64m4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -961,7 +874,6 @@ vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
   return vadc_vvm_u64m8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
index c4375dbd18843..09f3d8577bbab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vadd_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vadd_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vadd_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vadd_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vadd_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vadd_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vadd_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vadd_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vadd_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vadd_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vadd_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vadd_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vadd_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vadd_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vadd_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vadd_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vadd_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vadd_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vadd_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vadd_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vadd_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vadd_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vadd_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vadd_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vadd_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vadd_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vadd_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vadd_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vadd_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vadd_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vadd_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vadd_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vadd_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vadd_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vadd_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vadd_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vadd_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vadd_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vadd_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vadd_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vadd_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vadd_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -525,7 +473,6 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -535,7 +482,6 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vadd_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -545,7 +491,6 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -555,7 +500,6 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vadd_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -565,7 +509,6 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -575,7 +518,6 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vadd_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -585,7 +527,6 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vadd_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vadd_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vadd_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vadd_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vadd_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -635,7 +572,6 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vadd_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -645,7 +581,6 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vadd_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -655,7 +590,6 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vadd_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -665,7 +599,6 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vadd_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -675,7 +608,6 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vadd_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -685,7 +617,6 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vadd_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -695,7 +626,6 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vadd_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -705,7 +635,6 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vadd_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -715,7 +644,6 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vadd_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -725,7 +653,6 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vadd_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -735,7 +662,6 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vadd_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -745,7 +671,6 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vadd_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -755,7 +680,6 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vadd_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -765,7 +689,6 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vadd_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -775,7 +698,6 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vadd_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -785,7 +707,6 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vadd_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -795,7 +716,6 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vadd_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -805,7 +725,6 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vadd_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -815,7 +734,6 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vadd_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -825,7 +743,6 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vadd_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -835,7 +752,6 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vadd_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -845,7 +761,6 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vadd_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -855,7 +770,6 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vadd_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -865,7 +779,6 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vadd_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -875,7 +788,6 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vadd_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -885,7 +797,6 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vadd_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -895,7 +806,6 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -905,7 +815,6 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -915,7 +824,6 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -925,7 +833,6 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -935,7 +842,6 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -945,7 +851,6 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -955,7 +860,6 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -965,7 +869,6 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -975,7 +878,6 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -985,7 +887,6 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -995,7 +896,6 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +905,6 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1015,7 +914,6 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1025,7 +923,6 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1035,7 +932,6 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1045,7 +941,6 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1055,7 +950,6 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +959,6 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1075,7 +968,6 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1085,7 +977,6 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1095,7 +986,6 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1105,7 +995,6 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1115,7 +1004,6 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1125,7 +1013,6 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1135,7 +1022,6 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1145,7 +1031,6 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1155,7 +1040,6 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1049,6 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1175,7 +1058,6 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1185,7 +1067,6 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1195,7 +1076,6 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1205,7 +1085,6 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1215,7 +1094,6 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1103,6 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1235,7 +1112,6 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1121,6 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1255,7 +1130,6 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1139,6 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1275,7 +1148,6 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1285,7 +1157,6 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1295,7 +1166,6 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1305,7 +1175,6 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1315,7 +1184,6 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1325,7 +1193,6 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1335,7 +1202,6 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1345,7 +1211,6 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1220,6 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1365,7 +1229,6 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1238,6 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1385,7 +1247,6 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1395,7 +1256,6 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1405,7 +1265,6 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1415,7 +1274,6 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1425,7 +1283,6 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1292,6 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1445,7 +1301,6 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1455,7 +1310,6 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1319,6 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1475,7 +1328,6 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1485,7 +1337,6 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1495,7 +1346,6 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1505,7 +1355,6 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1515,7 +1364,6 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1525,7 +1373,6 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1535,7 +1382,6 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1545,7 +1391,6 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1400,6 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1565,7 +1409,6 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1575,7 +1418,6 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1585,7 +1427,6 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1595,7 +1436,6 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1605,7 +1445,6 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1615,7 +1454,6 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1463,6 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1635,7 +1472,6 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1645,7 +1481,6 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1655,7 +1490,6 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1665,7 +1499,6 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1675,7 +1508,6 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1685,7 +1517,6 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1526,6 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1705,7 +1535,6 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1715,7 +1544,6 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1725,7 +1553,6 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1735,7 +1562,6 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1745,7 +1571,6 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1755,7 +1580,6 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
index 82df30fd87698..c90fac2ecb4b8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamoaddei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamoaddei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamoaddei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoaddei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoaddei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamoaddei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamoaddei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamoaddei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamoaddei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamoaddei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamoaddei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamoaddei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamoaddei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamoaddei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamoaddei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamoaddei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamoaddei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamoaddei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamoaddei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamoaddei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamoaddei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamoaddei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoaddei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamoaddei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamoaddei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamoaddei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamoaddei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamoaddei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamoaddei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamoaddei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamoaddei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamoaddei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamoaddei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamoaddei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamoaddei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint
   return vamoaddei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32
   return vamoaddei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32
   return vamoaddei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m
   return vamoaddei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m
   return vamoaddei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vui
   return vamoaddei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint
   return vamoaddei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint3
   return vamoaddei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint3
   return vamoaddei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint3
   return vamoaddei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vui
   return vamoaddei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint3
   return vamoaddei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint3
   return vamoaddei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint3
   return vamoaddei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint3
   return vamoaddei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuin
   return vamoaddei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint3
   return vamoaddei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint3
   return vamoaddei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint3
   return vamoaddei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64
   return vamoaddei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64
   return vamoaddei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64
   return vamoaddei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m
   return vamoaddei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint
   return vamoaddei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint
   return vamoaddei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint6
   return vamoaddei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint6
   return vamoaddei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint
   return vamoaddei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint6
   return vamoaddei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint6
   return vamoaddei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint6
   return vamoaddei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint6
   return vamoaddei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint6
   return vamoaddei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint6
   return vamoaddei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6
   return vamoaddei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamoaddei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoaddei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoaddei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamoaddei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamoaddei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamoaddei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamoaddei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamoaddei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoaddei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoaddei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamoaddei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamoaddei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamoaddei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoaddei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoaddei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamoaddei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamoaddei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamoaddei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoaddei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoaddei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoaddei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoaddei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamoaddei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamoaddei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamoaddei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamoaddei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoaddei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamoaddei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamoaddei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamoaddei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoaddei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamoaddei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamoaddei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamoaddei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoaddei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m
   return vamoaddei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4
   return vamoaddei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2
   return vamoaddei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoaddei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoaddei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1
   return vamoaddei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m
   return vamoaddei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m
   return vamoaddei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2
   return vamoaddei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4
   return vamoaddei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3
   return vamoaddei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m
   return vamoaddei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m
   return vamoaddei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4
   return vamoaddei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8
   return vamoaddei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6
   return vamoaddei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m
   return vamoaddei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m
   return vamoaddei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8
   return vamoaddei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8
   return vamoaddei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4
   return vamoaddei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2
   return vamoaddei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoaddei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m
   return vamoaddei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m
   return vamoaddei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m
   return vamoaddei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2
   return vamoaddei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m
   return vamoaddei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m
   return vamoaddei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m
   return vamoaddei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4
   return vamoaddei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m
   return vamoaddei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m
   return vamoaddei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m
   return vamoaddei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
index 1eeb3f39ea0fc..abc9083c86f18 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamoandei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamoandei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamoandei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoandei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoandei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamoandei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamoandei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamoandei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamoandei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamoandei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamoandei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamoandei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamoandei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamoandei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamoandei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamoandei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamoandei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamoandei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamoandei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamoandei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamoandei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamoandei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoandei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamoandei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamoandei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamoandei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamoandei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamoandei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamoandei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamoandei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamoandei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamoandei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamoandei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamoandei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamoandei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint
   return vamoandei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32
   return vamoandei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32
   return vamoandei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m
   return vamoandei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m
   return vamoandei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vui
   return vamoandei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint
   return vamoandei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint3
   return vamoandei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint3
   return vamoandei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint3
   return vamoandei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vui
   return vamoandei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint3
   return vamoandei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint3
   return vamoandei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint3
   return vamoandei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint3
   return vamoandei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuin
   return vamoandei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint3
   return vamoandei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint3
   return vamoandei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint3
   return vamoandei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64
   return vamoandei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64
   return vamoandei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64
   return vamoandei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m
   return vamoandei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint
   return vamoandei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint
   return vamoandei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint6
   return vamoandei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint6
   return vamoandei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint
   return vamoandei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint6
   return vamoandei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint6
   return vamoandei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint6
   return vamoandei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint6
   return vamoandei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint6
   return vamoandei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint6
   return vamoandei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6
   return vamoandei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamoandei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoandei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoandei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamoandei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamoandei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamoandei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamoandei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamoandei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoandei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoandei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamoandei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamoandei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamoandei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoandei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoandei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamoandei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamoandei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamoandei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoandei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoandei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoandei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoandei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamoandei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamoandei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamoandei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamoandei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoandei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamoandei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamoandei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamoandei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoandei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamoandei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamoandei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamoandei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoandei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m
   return vamoandei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4
   return vamoandei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2
   return vamoandei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoandei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoandei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1
   return vamoandei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m
   return vamoandei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m
   return vamoandei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2
   return vamoandei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4
   return vamoandei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3
   return vamoandei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m
   return vamoandei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m
   return vamoandei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4
   return vamoandei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8
   return vamoandei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6
   return vamoandei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m
   return vamoandei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m
   return vamoandei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8
   return vamoandei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8
   return vamoandei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4
   return vamoandei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2
   return vamoandei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoandei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m
   return vamoandei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m
   return vamoandei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m
   return vamoandei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2
   return vamoandei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m
   return vamoandei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m
   return vamoandei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m
   return vamoandei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4
   return vamoandei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m
   return vamoandei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m
   return vamoandei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m
   return vamoandei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
index 4dcdea29b3216..486d9ad732941 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamomaxei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamomaxei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamomaxei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamomaxei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamomaxei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamomaxei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamomaxei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamomaxei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamomaxei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamomaxei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamomaxei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamomaxei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamomaxei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamomaxei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamomaxei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamomaxei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamomaxei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamomaxei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamomaxei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamomaxei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamomaxei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamomaxei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamomaxei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamomaxei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamomaxei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamomaxei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamomaxei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamomaxei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamomaxei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamomaxei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamomaxei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamomaxei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamomaxei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamomaxei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamomaxei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuin
   return vamomaxuei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint3
   return vamomaxuei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint3
   return vamomaxuei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32
   return vamomaxuei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32
   return vamomaxuei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vu
   return vamomaxuei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuin
   return vamomaxuei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint
   return vamomaxuei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint
   return vamomaxuei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint
   return vamomaxuei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vu
   return vamomaxuei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint
   return vamomaxuei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint
   return vamomaxuei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint
   return vamomaxuei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint
   return vamomaxuei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vui
   return vamomaxuei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint
   return vamomaxuei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint
   return vamomaxuei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint
   return vamomaxuei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint6
   return vamomaxuei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint6
   return vamomaxuei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint6
   return vamomaxuei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64
   return vamomaxuei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuin
   return vamomaxuei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuin
   return vamomaxuei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint
   return vamomaxuei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint
   return vamomaxuei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuin
   return vamomaxuei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint
   return vamomaxuei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint
   return vamomaxuei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint
   return vamomaxuei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint
   return vamomaxuei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint
   return vamomaxuei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint
   return vamomaxuei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint
   return vamomaxuei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamomaxei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamomaxei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamomaxei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamomaxei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamomaxei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamomaxei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamomaxei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamomaxei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamomaxei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamomaxei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamomaxei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamomaxei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamomaxei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamomaxei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamomaxei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamomaxei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamomaxei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamomaxei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamomaxei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamomaxei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamomaxei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamomaxei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamomaxei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamomaxei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamomaxei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamomaxei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamomaxei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamomaxei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamomaxei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamomaxei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamomaxei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamomaxei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamomaxei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamomaxei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamomaxei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8
   return vamomaxuei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf
   return vamomaxuei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf
   return vamomaxuei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_
   return vamomaxuei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_
   return vamomaxuei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamomaxuei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16
   return vamomaxuei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16
   return vamomaxuei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m
   return vamomaxuei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m
   return vamomaxuei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamomaxuei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32
   return vamomaxuei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32
   return vamomaxuei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m
   return vamomaxuei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m
   return vamomaxuei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamomaxuei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64
   return vamomaxuei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64
   return vamomaxuei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m
   return vamomaxuei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf
   return vamomaxuei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf
   return vamomaxuei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf
   return vamomaxuei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_
   return vamomaxuei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16
   return vamomaxuei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16
   return vamomaxuei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16
   return vamomaxuei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m
   return vamomaxuei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32
   return vamomaxuei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32
   return vamomaxuei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32
   return vamomaxuei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m
   return vamomaxuei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64
   return vamomaxuei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64
   return vamomaxuei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64
   return vamomaxuei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
index a60d8fb3c2fbb..b197cd89be8a0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamominei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamominei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamominei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamominei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamominei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamominei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamominei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamominei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamominei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamominei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamominei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamominei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamominei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamominei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamominei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamominei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamominei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamominei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamominei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamominei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamominei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamominei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamominei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamominei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamominei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamominei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamominei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamominei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamominei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamominei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamominei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamominei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamominei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamominei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamominei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuin
   return vamominuei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint3
   return vamominuei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint3
   return vamominuei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32
   return vamominuei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32
   return vamominuei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vu
   return vamominuei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuin
   return vamominuei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint
   return vamominuei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint
   return vamominuei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint
   return vamominuei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vu
   return vamominuei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint
   return vamominuei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint
   return vamominuei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint
   return vamominuei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint
   return vamominuei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vui
   return vamominuei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint
   return vamominuei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint
   return vamominuei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint
   return vamominuei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint6
   return vamominuei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint6
   return vamominuei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint6
   return vamominuei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64
   return vamominuei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuin
   return vamominuei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuin
   return vamominuei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint
   return vamominuei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint
   return vamominuei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuin
   return vamominuei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint
   return vamominuei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint
   return vamominuei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint
   return vamominuei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint
   return vamominuei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint
   return vamominuei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint
   return vamominuei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint
   return vamominuei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamominei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamominei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamominei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamominei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamominei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamominei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamominei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamominei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamominei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamominei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamominei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamominei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamominei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamominei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamominei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamominei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamominei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamominei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamominei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamominei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamominei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamominei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamominei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamominei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamominei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamominei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamominei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamominei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamominei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamominei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamominei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamominei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamominei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamominei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamominei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8
   return vamominuei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf
   return vamominuei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf
   return vamominuei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_
   return vamominuei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_
   return vamominuei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamominuei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16
   return vamominuei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16
   return vamominuei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m
   return vamominuei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m
   return vamominuei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamominuei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32
   return vamominuei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32
   return vamominuei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m
   return vamominuei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m
   return vamominuei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamominuei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64
   return vamominuei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64
   return vamominuei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m
   return vamominuei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf
   return vamominuei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf
   return vamominuei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf
   return vamominuei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_
   return vamominuei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16
   return vamominuei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16
   return vamominuei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16
   return vamominuei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m
   return vamominuei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32
   return vamominuei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32
   return vamominuei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32
   return vamominuei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m
   return vamominuei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64
   return vamominuei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64
   return vamominuei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64
   return vamominuei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
index 40faeb2dfbab6..6ce8f94bcc07d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf
   return vamoorei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t
   return vamoorei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t
   return vamoorei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoorei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoorei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32
   return vamoorei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1
   return vamoorei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_
   return vamoorei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_
   return vamoorei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_
   return vamoorei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32
   return vamoorei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_
   return vamoorei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_
   return vamoorei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_
   return vamoorei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_
   return vamoorei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32m
   return vamoorei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_
   return vamoorei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_
   return vamoorei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_
   return vamoorei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t
   return vamoorei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t
   return vamoorei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t
   return vamoorei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoorei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1
   return vamoorei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2
   return vamoorei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_
   return vamoorei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_
   return vamoorei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1
   return vamoorei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_
   return vamoorei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_
   return vamoorei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_
   return vamoorei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_
   return vamoorei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_
   return vamoorei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_
   return vamoorei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_
   return vamoorei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint3
   return vamoorei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m
   return vamoorei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m
   return vamoorei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4
   return vamoorei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8
   return vamoorei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuin
   return vamoorei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint3
   return vamoorei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32
   return vamoorei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32
   return vamoorei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32
   return vamoorei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuin
   return vamoorei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32
   return vamoorei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32
   return vamoorei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32
   return vamoorei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32
   return vamoorei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint
   return vamoorei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32
   return vamoorei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32
   return vamoorei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32
   return vamoorei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m
   return vamoorei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m
   return vamoorei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m
   return vamoorei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8
   return vamoorei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint6
   return vamoorei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint6
   return vamoorei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64
   return vamoorei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64
   return vamoorei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint6
   return vamoorei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64
   return vamoorei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64
   return vamoorei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64
   return vamoorei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64
   return vamoorei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64
   return vamoorei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64
   return vamoorei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64
   return vamoorei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_
   return vamoorei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoorei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoorei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bi
   return vamoorei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bi
   return vamoorei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf
   return vamoorei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_
   return vamoorei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t
   return vamoorei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoorei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoorei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf
   return vamoorei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t
   return vamoorei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t
   return vamoorei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoorei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoorei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1
   return vamoorei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t
   return vamoorei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t
   return vamoorei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoorei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoorei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoorei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoorei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bi
   return vamoorei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_
   return vamoorei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_
   return vamoorei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t
   return vamoorei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoorei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_
   return vamoorei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t
   return vamoorei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t
   return vamoorei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoorei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t
   return vamoorei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t
   return vamoorei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t
   return vamoorei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoorei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf
   return vamoorei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_
   return vamoorei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_
   return vamoorei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoorei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoorei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16
   return vamoorei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf
   return vamoorei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1
   return vamoorei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_
   return vamoorei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_
   return vamoorei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32
   return vamoorei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1
   return vamoorei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2
   return vamoorei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_
   return vamoorei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_
   return vamoorei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64
   return vamoorei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2
   return vamoorei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4
   return vamoorei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_
   return vamoorei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_
   return vamoorei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_
   return vamoorei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_
   return vamoorei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoorei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf
   return vamoorei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf
   return vamoorei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1
   return vamoorei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_
   return vamoorei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf
   return vamoorei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1
   return vamoorei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2
   return vamoorei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_
   return vamoorei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1
   return vamoorei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2
   return vamoorei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4
   return vamoorei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
index edacb2f6eb648..8c40d513c74c0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -16,7 +15,6 @@ vint32mf2_t test_vamoswapei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32
   return vamoswapei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -27,7 +25,6 @@ vint32m1_t test_vamoswapei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1
   return vamoswapei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -38,7 +35,6 @@ vint32m2_t test_vamoswapei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2
   return vamoswapei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -49,7 +45,6 @@ vint32m4_t test_vamoswapei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_
   return vamoswapei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -60,7 +55,6 @@ vint32m8_t test_vamoswapei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_
   return vamoswapei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -71,7 +65,6 @@ vint32mf2_t test_vamoswapei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint
   return vamoswapei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -82,7 +75,6 @@ vint32m1_t test_vamoswapei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32
   return vamoswapei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -93,7 +85,6 @@ vint32m2_t test_vamoswapei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m
   return vamoswapei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -104,7 +95,6 @@ vint32m4_t test_vamoswapei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m
   return vamoswapei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -115,7 +105,6 @@ vint32m8_t test_vamoswapei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m
   return vamoswapei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -126,7 +115,6 @@ vint32mf2_t test_vamoswapei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint
   return vamoswapei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -137,7 +125,6 @@ vint32m1_t test_vamoswapei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m
   return vamoswapei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -148,7 +135,6 @@ vint32m2_t test_vamoswapei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m
   return vamoswapei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -159,7 +145,6 @@ vint32m4_t test_vamoswapei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m
   return vamoswapei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -170,7 +155,6 @@ vint32m8_t test_vamoswapei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m
   return vamoswapei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -181,7 +165,6 @@ vint32mf2_t test_vamoswapei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint3
   return vamoswapei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -192,7 +175,6 @@ vint32m1_t test_vamoswapei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m
   return vamoswapei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -203,7 +185,6 @@ vint32m2_t test_vamoswapei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m
   return vamoswapei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -214,7 +195,6 @@ vint32m4_t test_vamoswapei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m
   return vamoswapei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -225,7 +205,6 @@ vint64m1_t test_vamoswapei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1
   return vamoswapei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -236,7 +215,6 @@ vint64m2_t test_vamoswapei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2
   return vamoswapei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -247,7 +225,6 @@ vint64m4_t test_vamoswapei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4
   return vamoswapei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -258,7 +235,6 @@ vint64m8_t test_vamoswapei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_
   return vamoswapei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -269,7 +245,6 @@ vint64m1_t test_vamoswapei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64
   return vamoswapei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -280,7 +255,6 @@ vint64m2_t test_vamoswapei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64
   return vamoswapei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -291,7 +265,6 @@ vint64m4_t test_vamoswapei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m
   return vamoswapei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -302,7 +275,6 @@ vint64m8_t test_vamoswapei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m
   return vamoswapei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -313,7 +285,6 @@ vint64m1_t test_vamoswapei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64
   return vamoswapei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -324,7 +295,6 @@ vint64m2_t test_vamoswapei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m
   return vamoswapei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -335,7 +305,6 @@ vint64m4_t test_vamoswapei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m
   return vamoswapei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -346,7 +315,6 @@ vint64m8_t test_vamoswapei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m
   return vamoswapei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -357,7 +325,6 @@ vint64m1_t test_vamoswapei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m
   return vamoswapei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -368,7 +335,6 @@ vint64m2_t test_vamoswapei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m
   return vamoswapei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -379,7 +345,6 @@ vint64m4_t test_vamoswapei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m
   return vamoswapei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -390,7 +355,6 @@ vint64m8_t test_vamoswapei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m
   return vamoswapei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -401,7 +365,6 @@ vuint32mf2_t test_vamoswapei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuin
   return vamoswapei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -412,7 +375,6 @@ vuint32m1_t test_vamoswapei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint3
   return vamoswapei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -423,7 +385,6 @@ vuint32m2_t test_vamoswapei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint3
   return vamoswapei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -434,7 +395,6 @@ vuint32m4_t test_vamoswapei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32
   return vamoswapei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -445,7 +405,6 @@ vuint32m8_t test_vamoswapei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32
   return vamoswapei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -456,7 +415,6 @@ vuint32mf2_t test_vamoswapei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vu
   return vamoswapei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -467,7 +425,6 @@ vuint32m1_t test_vamoswapei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuin
   return vamoswapei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -478,7 +435,6 @@ vuint32m2_t test_vamoswapei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint
   return vamoswapei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -489,7 +445,6 @@ vuint32m4_t test_vamoswapei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint
   return vamoswapei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -500,7 +455,6 @@ vuint32m8_t test_vamoswapei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint
   return vamoswapei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -511,7 +465,6 @@ vuint32mf2_t test_vamoswapei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vu
   return vamoswapei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -522,7 +475,6 @@ vuint32m1_t test_vamoswapei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint
   return vamoswapei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -533,7 +485,6 @@ vuint32m2_t test_vamoswapei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint
   return vamoswapei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -544,7 +495,6 @@ vuint32m4_t test_vamoswapei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint
   return vamoswapei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -555,7 +505,6 @@ vuint32m8_t test_vamoswapei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint
   return vamoswapei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -566,7 +515,6 @@ vuint32mf2_t test_vamoswapei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vui
   return vamoswapei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -577,7 +525,6 @@ vuint32m1_t test_vamoswapei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint
   return vamoswapei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -588,7 +535,6 @@ vuint32m2_t test_vamoswapei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint
   return vamoswapei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -599,7 +545,6 @@ vuint32m4_t test_vamoswapei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint
   return vamoswapei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -610,7 +555,6 @@ vuint64m1_t test_vamoswapei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint6
   return vamoswapei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -621,7 +565,6 @@ vuint64m2_t test_vamoswapei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint6
   return vamoswapei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -632,7 +575,6 @@ vuint64m4_t test_vamoswapei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint6
   return vamoswapei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -643,7 +585,6 @@ vuint64m8_t test_vamoswapei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64
   return vamoswapei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -654,7 +595,6 @@ vuint64m1_t test_vamoswapei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuin
   return vamoswapei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -665,7 +605,6 @@ vuint64m2_t test_vamoswapei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuin
   return vamoswapei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -676,7 +615,6 @@ vuint64m4_t test_vamoswapei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint
   return vamoswapei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -687,7 +625,6 @@ vuint64m8_t test_vamoswapei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint
   return vamoswapei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -698,7 +635,6 @@ vuint64m1_t test_vamoswapei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuin
   return vamoswapei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -709,7 +645,6 @@ vuint64m2_t test_vamoswapei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint
   return vamoswapei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -720,7 +655,6 @@ vuint64m4_t test_vamoswapei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint
   return vamoswapei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -731,7 +665,6 @@ vuint64m8_t test_vamoswapei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint
   return vamoswapei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -742,7 +675,6 @@ vuint64m1_t test_vamoswapei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint
   return vamoswapei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -753,7 +685,6 @@ vuint64m2_t test_vamoswapei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint
   return vamoswapei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -764,7 +695,6 @@ vuint64m4_t test_vamoswapei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint
   return vamoswapei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -775,7 +705,6 @@ vuint64m8_t test_vamoswapei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint
   return vamoswapei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -786,7 +715,6 @@ vfloat32mf2_t test_vamoswapei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat
   return vamoswapei8_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -797,7 +725,6 @@ vfloat32m1_t test_vamoswapei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32
   return vamoswapei8_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -808,7 +735,6 @@ vfloat32m2_t test_vamoswapei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32
   return vamoswapei8_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -819,7 +745,6 @@ vfloat32m4_t test_vamoswapei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m
   return vamoswapei8_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -830,7 +755,6 @@ vfloat32m8_t test_vamoswapei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m
   return vamoswapei8_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -841,7 +765,6 @@ vfloat32mf2_t test_vamoswapei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vflo
   return vamoswapei16_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -852,7 +775,6 @@ vfloat32m1_t test_vamoswapei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat
   return vamoswapei16_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -863,7 +785,6 @@ vfloat32m2_t test_vamoswapei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat3
   return vamoswapei16_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -874,7 +795,6 @@ vfloat32m4_t test_vamoswapei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat3
   return vamoswapei16_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -885,7 +805,6 @@ vfloat32m8_t test_vamoswapei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat3
   return vamoswapei16_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -896,7 +815,6 @@ vfloat32mf2_t test_vamoswapei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vflo
   return vamoswapei32_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -907,7 +825,6 @@ vfloat32m1_t test_vamoswapei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat3
   return vamoswapei32_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -918,7 +835,6 @@ vfloat32m2_t test_vamoswapei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat3
   return vamoswapei32_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -929,7 +845,6 @@ vfloat32m4_t test_vamoswapei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat3
   return vamoswapei32_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -940,7 +855,6 @@ vfloat32m8_t test_vamoswapei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat3
   return vamoswapei32_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -951,7 +865,6 @@ vfloat32mf2_t test_vamoswapei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloa
   return vamoswapei64_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -962,7 +875,6 @@ vfloat32m1_t test_vamoswapei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat3
   return vamoswapei64_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -973,7 +885,6 @@ vfloat32m2_t test_vamoswapei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat3
   return vamoswapei64_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -984,7 +895,6 @@ vfloat32m4_t test_vamoswapei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat3
   return vamoswapei64_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -995,7 +905,6 @@ vfloat64m1_t test_vamoswapei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat6
   return vamoswapei8_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1006,7 +915,6 @@ vfloat64m2_t test_vamoswapei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat6
   return vamoswapei8_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1017,7 +925,6 @@ vfloat64m4_t test_vamoswapei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat6
   return vamoswapei8_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1028,7 +935,6 @@ vfloat64m8_t test_vamoswapei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64
   return vamoswapei8_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1039,7 +945,6 @@ vfloat64m1_t test_vamoswapei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloa
   return vamoswapei16_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1050,7 +955,6 @@ vfloat64m2_t test_vamoswapei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloa
   return vamoswapei16_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1061,7 +965,6 @@ vfloat64m4_t test_vamoswapei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat
   return vamoswapei16_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1072,7 +975,6 @@ vfloat64m8_t test_vamoswapei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat
   return vamoswapei16_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1083,7 +985,6 @@ vfloat64m1_t test_vamoswapei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloa
   return vamoswapei32_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1094,7 +995,6 @@ vfloat64m2_t test_vamoswapei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat
   return vamoswapei32_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1105,7 +1005,6 @@ vfloat64m4_t test_vamoswapei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat
   return vamoswapei32_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1116,7 +1015,6 @@ vfloat64m8_t test_vamoswapei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat
   return vamoswapei32_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1127,7 +1025,6 @@ vfloat64m1_t test_vamoswapei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat
   return vamoswapei64_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1138,7 +1035,6 @@ vfloat64m2_t test_vamoswapei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat
   return vamoswapei64_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1149,7 +1045,6 @@ vfloat64m4_t test_vamoswapei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat
   return vamoswapei64_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1160,7 +1055,6 @@ vfloat64m8_t test_vamoswapei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat
   return vamoswapei64_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1171,7 +1065,6 @@ vint32mf2_t test_vamoswapei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf
   return vamoswapei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1182,7 +1075,6 @@ vint32m1_t test_vamoswapei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_
   return vamoswapei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1193,7 +1085,6 @@ vint32m2_t test_vamoswapei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_
   return vamoswapei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1204,7 +1095,6 @@ vint32m4_t test_vamoswapei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t
   return vamoswapei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1215,7 +1105,6 @@ vint32m8_t test_vamoswapei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t
   return vamoswapei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1226,7 +1115,6 @@ vint32mf2_t test_vamoswapei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16
   return vamoswapei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1237,7 +1125,6 @@ vint32m1_t test_vamoswapei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf
   return vamoswapei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1248,7 +1135,6 @@ vint32m2_t test_vamoswapei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1
   return vamoswapei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1259,7 +1145,6 @@ vint32m4_t test_vamoswapei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_
   return vamoswapei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1270,7 +1155,6 @@ vint32m8_t test_vamoswapei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_
   return vamoswapei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1281,7 +1165,6 @@ vint32mf2_t test_vamoswapei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32
   return vamoswapei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1292,7 +1175,6 @@ vint32m1_t test_vamoswapei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1
   return vamoswapei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1303,7 +1185,6 @@ vint32m2_t test_vamoswapei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2
   return vamoswapei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1314,7 +1195,6 @@ vint32m4_t test_vamoswapei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_
   return vamoswapei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1325,7 +1205,6 @@ vint32m8_t test_vamoswapei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_
   return vamoswapei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1336,7 +1215,6 @@ vint32mf2_t test_vamoswapei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64
   return vamoswapei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1347,7 +1225,6 @@ vint32m1_t test_vamoswapei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2
   return vamoswapei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1358,7 +1235,6 @@ vint32m2_t test_vamoswapei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4
   return vamoswapei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1369,7 +1245,6 @@ vint32m4_t test_vamoswapei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_
   return vamoswapei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1380,7 +1255,6 @@ vint64m1_t test_vamoswapei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_
   return vamoswapei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1391,7 +1265,6 @@ vint64m2_t test_vamoswapei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_
   return vamoswapei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1402,7 +1275,6 @@ vint64m4_t test_vamoswapei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_
   return vamoswapei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1413,7 +1285,6 @@ vint64m8_t test_vamoswapei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t
   return vamoswapei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1424,7 +1295,6 @@ vint64m1_t test_vamoswapei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf
   return vamoswapei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1435,7 +1305,6 @@ vint64m2_t test_vamoswapei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf
   return vamoswapei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1446,7 +1315,6 @@ vint64m4_t test_vamoswapei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1
   return vamoswapei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1457,7 +1325,6 @@ vint64m8_t test_vamoswapei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_
   return vamoswapei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1468,7 +1335,6 @@ vint64m1_t test_vamoswapei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf
   return vamoswapei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1479,7 +1345,6 @@ vint64m2_t test_vamoswapei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1
   return vamoswapei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1490,7 +1355,6 @@ vint64m4_t test_vamoswapei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2
   return vamoswapei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1501,7 +1365,6 @@ vint64m8_t test_vamoswapei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_
   return vamoswapei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1512,7 +1375,6 @@ vint64m1_t test_vamoswapei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1
   return vamoswapei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1523,7 +1385,6 @@ vint64m2_t test_vamoswapei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2
   return vamoswapei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1534,7 +1395,6 @@ vint64m4_t test_vamoswapei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4
   return vamoswapei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1545,7 +1405,6 @@ vint64m8_t test_vamoswapei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_
   return vamoswapei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1556,7 +1415,6 @@ vuint32mf2_t test_vamoswapei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8
   return vamoswapei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1567,7 +1425,6 @@ vuint32m1_t test_vamoswapei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf
   return vamoswapei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1578,7 +1435,6 @@ vuint32m2_t test_vamoswapei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf
   return vamoswapei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1589,7 +1445,6 @@ vuint32m4_t test_vamoswapei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_
   return vamoswapei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1600,7 +1455,6 @@ vuint32m8_t test_vamoswapei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_
   return vamoswapei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1611,7 +1465,6 @@ vuint32mf2_t test_vamoswapei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamoswapei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1622,7 +1475,6 @@ vuint32m1_t test_vamoswapei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16
   return vamoswapei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1633,7 +1485,6 @@ vuint32m2_t test_vamoswapei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16
   return vamoswapei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1644,7 +1495,6 @@ vuint32m4_t test_vamoswapei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m
   return vamoswapei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1655,7 +1505,6 @@ vuint32m8_t test_vamoswapei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m
   return vamoswapei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1666,7 +1515,6 @@ vuint32mf2_t test_vamoswapei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamoswapei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1677,7 +1525,6 @@ vuint32m1_t test_vamoswapei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32
   return vamoswapei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1688,7 +1535,6 @@ vuint32m2_t test_vamoswapei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32
   return vamoswapei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1699,7 +1545,6 @@ vuint32m4_t test_vamoswapei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m
   return vamoswapei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1710,7 +1555,6 @@ vuint32m8_t test_vamoswapei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m
   return vamoswapei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1721,7 +1565,6 @@ vuint32mf2_t test_vamoswapei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamoswapei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1732,7 +1575,6 @@ vuint32m1_t test_vamoswapei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64
   return vamoswapei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1743,7 +1585,6 @@ vuint32m2_t test_vamoswapei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64
   return vamoswapei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1754,7 +1595,6 @@ vuint32m4_t test_vamoswapei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m
   return vamoswapei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1765,7 +1605,6 @@ vuint64m1_t test_vamoswapei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf
   return vamoswapei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1776,7 +1615,6 @@ vuint64m2_t test_vamoswapei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf
   return vamoswapei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1787,7 +1625,6 @@ vuint64m4_t test_vamoswapei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf
   return vamoswapei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1798,7 +1635,6 @@ vuint64m8_t test_vamoswapei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_
   return vamoswapei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1809,7 +1645,6 @@ vuint64m1_t test_vamoswapei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16
   return vamoswapei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1820,7 +1655,6 @@ vuint64m2_t test_vamoswapei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16
   return vamoswapei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1831,7 +1665,6 @@ vuint64m4_t test_vamoswapei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16
   return vamoswapei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1842,7 +1675,6 @@ vuint64m8_t test_vamoswapei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m
   return vamoswapei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1853,7 +1685,6 @@ vuint64m1_t test_vamoswapei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32
   return vamoswapei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1864,7 +1695,6 @@ vuint64m2_t test_vamoswapei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32
   return vamoswapei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1875,7 +1705,6 @@ vuint64m4_t test_vamoswapei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32
   return vamoswapei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1886,7 +1715,6 @@ vuint64m8_t test_vamoswapei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m
   return vamoswapei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1897,7 +1725,6 @@ vuint64m1_t test_vamoswapei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64
   return vamoswapei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1908,7 +1735,6 @@ vuint64m2_t test_vamoswapei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64
   return vamoswapei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1919,7 +1745,6 @@ vuint64m4_t test_vamoswapei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64
   return vamoswapei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1930,7 +1755,6 @@ vuint64m8_t test_vamoswapei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m
   return vamoswapei64_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1941,7 +1765,6 @@ vfloat32mf2_t test_vamoswapei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf
   return vamoswapei8_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1952,7 +1775,6 @@ vfloat32m1_t test_vamoswapei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_
   return vamoswapei8_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1963,7 +1785,6 @@ vfloat32m2_t test_vamoswapei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_
   return vamoswapei8_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1974,7 +1795,6 @@ vfloat32m4_t test_vamoswapei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t
   return vamoswapei8_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1985,7 +1805,6 @@ vfloat32m8_t test_vamoswapei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t
   return vamoswapei8_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1996,7 +1815,6 @@ vfloat32mf2_t test_vamoswapei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16
   return vamoswapei16_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2007,7 +1825,6 @@ vfloat32m1_t test_vamoswapei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf
   return vamoswapei16_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2018,7 +1835,6 @@ vfloat32m2_t test_vamoswapei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1
   return vamoswapei16_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2029,7 +1845,6 @@ vfloat32m4_t test_vamoswapei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_
   return vamoswapei16_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2040,7 +1855,6 @@ vfloat32m8_t test_vamoswapei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_
   return vamoswapei16_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2051,7 +1865,6 @@ vfloat32mf2_t test_vamoswapei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32
   return vamoswapei32_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2062,7 +1875,6 @@ vfloat32m1_t test_vamoswapei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1
   return vamoswapei32_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2073,7 +1885,6 @@ vfloat32m2_t test_vamoswapei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2
   return vamoswapei32_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2084,7 +1895,6 @@ vfloat32m4_t test_vamoswapei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_
   return vamoswapei32_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2095,7 +1905,6 @@ vfloat32m8_t test_vamoswapei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_
   return vamoswapei32_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2106,7 +1915,6 @@ vfloat32mf2_t test_vamoswapei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64
   return vamoswapei64_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2117,7 +1925,6 @@ vfloat32m1_t test_vamoswapei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2
   return vamoswapei64_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2128,7 +1935,6 @@ vfloat32m2_t test_vamoswapei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4
   return vamoswapei64_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2139,7 +1945,6 @@ vfloat32m4_t test_vamoswapei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_
   return vamoswapei64_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2150,7 +1955,6 @@ vfloat64m1_t test_vamoswapei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8
   return vamoswapei8_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2161,7 +1965,6 @@ vfloat64m2_t test_vamoswapei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4
   return vamoswapei8_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2172,7 +1975,6 @@ vfloat64m4_t test_vamoswapei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2
   return vamoswapei8_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2183,7 +1985,6 @@ vfloat64m8_t test_vamoswapei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t
   return vamoswapei8_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2194,7 +1995,6 @@ vfloat64m1_t test_vamoswapei16_v_f64m1_m (vbool64_t mask, double *base, vuint16m
   return vamoswapei16_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2205,7 +2005,6 @@ vfloat64m2_t test_vamoswapei16_v_f64m2_m (vbool32_t mask, double *base, vuint16m
   return vamoswapei16_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2216,7 +2015,6 @@ vfloat64m4_t test_vamoswapei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m
   return vamoswapei16_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2227,7 +2025,6 @@ vfloat64m8_t test_vamoswapei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2
   return vamoswapei16_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2238,7 +2035,6 @@ vfloat64m1_t test_vamoswapei32_v_f64m1_m (vbool64_t mask, double *base, vuint32m
   return vamoswapei32_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2249,7 +2045,6 @@ vfloat64m2_t test_vamoswapei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m
   return vamoswapei32_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2260,7 +2055,6 @@ vfloat64m4_t test_vamoswapei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m
   return vamoswapei32_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2271,7 +2065,6 @@ vfloat64m8_t test_vamoswapei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4
   return vamoswapei32_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2282,7 +2075,6 @@ vfloat64m1_t test_vamoswapei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m
   return vamoswapei64_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2293,7 +2085,6 @@ vfloat64m2_t test_vamoswapei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m
   return vamoswapei64_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2304,7 +2095,6 @@ vfloat64m4_t test_vamoswapei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m
   return vamoswapei64_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
index 47dee84b5805f..cc5d15e7c9dc0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamoxorei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamoxorei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamoxorei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoxorei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoxorei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamoxorei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamoxorei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamoxorei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamoxorei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamoxorei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamoxorei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamoxorei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamoxorei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamoxorei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamoxorei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamoxorei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamoxorei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamoxorei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamoxorei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamoxorei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamoxorei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamoxorei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoxorei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamoxorei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamoxorei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamoxorei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamoxorei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamoxorei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamoxorei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamoxorei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamoxorei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamoxorei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamoxorei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamoxorei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamoxorei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint
   return vamoxorei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32
   return vamoxorei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32
   return vamoxorei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m
   return vamoxorei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m
   return vamoxorei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vui
   return vamoxorei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint
   return vamoxorei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint3
   return vamoxorei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint3
   return vamoxorei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint3
   return vamoxorei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vui
   return vamoxorei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint3
   return vamoxorei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint3
   return vamoxorei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint3
   return vamoxorei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint3
   return vamoxorei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuin
   return vamoxorei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint3
   return vamoxorei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint3
   return vamoxorei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint3
   return vamoxorei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64
   return vamoxorei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64
   return vamoxorei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64
   return vamoxorei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m
   return vamoxorei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint
   return vamoxorei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint
   return vamoxorei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint6
   return vamoxorei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint6
   return vamoxorei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint
   return vamoxorei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint6
   return vamoxorei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint6
   return vamoxorei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint6
   return vamoxorei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint6
   return vamoxorei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint6
   return vamoxorei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint6
   return vamoxorei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6
   return vamoxorei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamoxorei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoxorei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoxorei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamoxorei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamoxorei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamoxorei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamoxorei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamoxorei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoxorei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoxorei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamoxorei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamoxorei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamoxorei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoxorei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoxorei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamoxorei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamoxorei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamoxorei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoxorei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoxorei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoxorei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoxorei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamoxorei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamoxorei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamoxorei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamoxorei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoxorei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamoxorei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamoxorei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamoxorei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoxorei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamoxorei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamoxorei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamoxorei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoxorei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m
   return vamoxorei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4
   return vamoxorei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2
   return vamoxorei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoxorei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoxorei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1
   return vamoxorei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m
   return vamoxorei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m
   return vamoxorei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2
   return vamoxorei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4
   return vamoxorei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3
   return vamoxorei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m
   return vamoxorei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m
   return vamoxorei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4
   return vamoxorei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8
   return vamoxorei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6
   return vamoxorei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m
   return vamoxorei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m
   return vamoxorei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8
   return vamoxorei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8
   return vamoxorei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4
   return vamoxorei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2
   return vamoxorei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoxorei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m
   return vamoxorei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m
   return vamoxorei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m
   return vamoxorei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2
   return vamoxorei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m
   return vamoxorei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m
   return vamoxorei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m
   return vamoxorei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4
   return vamoxorei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m
   return vamoxorei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m
   return vamoxorei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m
   return vamoxorei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
index c8f6077dbca34..2e05ccb812645 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vand_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vand_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vand_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vand_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vand_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vand_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vand_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vand_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vand_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vand_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vand_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vand_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vand_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vand_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vand_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vand_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vand_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vand_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vand_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vand_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vand_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vand_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vand_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vand_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vand_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vand_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vand_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vand_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vand_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vand_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vand_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vand_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vand_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vand_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vand_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vand_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vand_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vand_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vand_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vand_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vand_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vand_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vand_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vand_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vand_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vand_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vand_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vand_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vand_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vand_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vand_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vand_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vand_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vand_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vand_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vand_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vand_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vand_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vand_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vand_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vand_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vand_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vand_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vand_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vand_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vand_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vand_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vand_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vand_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vand_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vand_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vand_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vand_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vand_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vand_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vand_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
index 3fcd1e13bb01a..9ce439b5f38da 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vasub_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vasub_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vasub_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vasub_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vasub_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vasub_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vasub_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vasub_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vasub_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vasub_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vasub_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vasub_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vasub_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vasub_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vasub_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vasub_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vasub_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vasub_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vasub_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vasub_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vasub_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vasub_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vasub_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vasub_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vasub_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vasub_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vasub_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vasub_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vasub_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vasub_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vasub_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vasub_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vasub_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vasub_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vasub_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vasub_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vasub_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vasub_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vasubu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vasubu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vasubu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vasubu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vasubu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vasubu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vasubu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vasubu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vasubu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vasubu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vasubu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vasubu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vasubu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vasubu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vasubu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vasubu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vasubu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vasubu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vasubu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vasubu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vasubu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vasubu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vasubu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vasubu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vasubu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vasubu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vasubu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vasubu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vasubu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vasubu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vasubu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vasubu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vasubu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vasubu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vasubu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vasubu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vasubu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -887,7 +799,6 @@ vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vasubu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -898,7 +809,6 @@ vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vasub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -909,7 +819,6 @@ vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vasub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +829,6 @@ vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vasub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vasub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -942,7 +849,6 @@ vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vasub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -953,7 +859,6 @@ vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vasub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +869,6 @@ vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vasub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -975,7 +879,6 @@ vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vasub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -986,7 +889,6 @@ vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vasub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -997,7 +899,6 @@ vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vasub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1008,7 +909,6 @@ vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vasub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1019,7 +919,6 @@ vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vasub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +929,6 @@ vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vasub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +939,6 @@ vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vasub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +950,6 @@ vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vasub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +960,6 @@ vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vasub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +971,6 @@ vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vasub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1087,7 +981,6 @@ vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vasub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1098,7 +991,6 @@ vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vasub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1109,7 +1001,6 @@ vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vasub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1120,7 +1011,6 @@ vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vasub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1021,6 @@ vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vasub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1031,6 @@ vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vasub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1153,7 +1041,6 @@ vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vasub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1051,6 @@ vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vasub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1175,7 +1061,6 @@ vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vasub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1072,6 @@ vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vasub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1198,7 +1082,6 @@ vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vasub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1209,7 +1092,6 @@ vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vasub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1220,7 +1102,6 @@ vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vasub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1231,7 +1112,6 @@ vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vasub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1242,7 +1122,6 @@ vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vasub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1253,7 +1132,6 @@ vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vasub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1142,6 @@ vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vasub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1275,7 +1152,6 @@ vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vasub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1162,6 @@ vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vasub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1297,7 +1172,6 @@ vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vasub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1308,7 +1182,6 @@ vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vasub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1319,7 +1192,6 @@ vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vasub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1330,7 +1202,6 @@ vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vasub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1341,7 +1212,6 @@ vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vasub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1352,7 +1222,6 @@ vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vasub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1363,7 +1232,6 @@ vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vasub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1242,6 @@ vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vasub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1253,6 @@ vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vasubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1397,7 +1263,6 @@ vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vasubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1274,6 @@ vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vasubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1420,7 +1284,6 @@ vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vasubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1295,6 @@ vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vasubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1443,7 +1305,6 @@ vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vasubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1315,6 @@ vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vasubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1325,6 @@ vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vasubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1335,6 @@ vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vasubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1487,7 +1345,6 @@ vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vasubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1498,7 +1355,6 @@ vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vasubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1509,7 +1365,6 @@ vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vasubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1520,7 +1375,6 @@ vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vasubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1531,7 +1385,6 @@ vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vasubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1396,6 @@ vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vasubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1407,6 @@ vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vasubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1418,6 @@ vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vasubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1429,6 @@ vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vasubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1440,6 @@ vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vasubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1602,7 +1450,6 @@ vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vasubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1461,6 @@ vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vasubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1471,6 @@ vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vasubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1482,6 @@ vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vasubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vasubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vasubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vasubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vasubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vasubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vasubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vasubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vasubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vasubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1588,6 @@ vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vasubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1598,6 @@ vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vasubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1609,6 @@ vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vasubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1619,6 @@ vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vasubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1630,6 @@ vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vasubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1810,7 +1640,6 @@ vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vasubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1651,6 @@ vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vasubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1833,7 +1661,6 @@ vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vasubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1672,6 @@ vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vasubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1682,6 @@ vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vasubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1693,6 @@ vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vasubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
index b280d71fbdd99..19b99bae5ec71 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vdiv_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vdiv_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vdiv_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vdiv_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vdiv_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vdiv_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vdiv_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vdiv_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vdiv_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vdiv_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vdiv_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vdiv_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vdiv_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vdiv_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vdiv_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vdiv_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vdiv_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vdiv_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vdiv_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vdiv_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vdiv_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vdiv_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vdiv_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vdiv_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vdiv_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vdiv_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vdiv_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vdiv_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vdiv_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vdiv_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vdiv_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vdiv_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vdiv_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vdiv_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vdiv_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vdiv_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vdiv_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vdiv_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vdivu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vdivu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vdivu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vdivu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vdivu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vdivu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vdivu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vdivu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vdivu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vdivu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vdivu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vdivu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vdivu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vdivu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vdivu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vdivu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vdivu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vdivu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vdivu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vdivu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vdivu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vdivu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vdivu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vdivu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vdivu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vdivu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vdivu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vdivu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vdivu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vdivu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vdivu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vdivu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vdivu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vdivu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vdivu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vdivu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vdivu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vdivu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
   return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
index 355569f8ab19d..4fa3b2a73f5ef 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
   return vfabs_v_f32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
   return vfabs_v_f32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
   return vfabs_v_f32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
   return vfabs_v_f32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
   return vfabs_v_f32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
   return vfabs_v_f64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
   return vfabs_v_f64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
   return vfabs_v_f64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) {
   return vfabs_v_f64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf
   return vfabs_v_f32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa
   return vfabs_v_f32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa
   return vfabs_v_f32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat
   return vfabs_v_f32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat
   return vfabs_v_f32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa
   return vfabs_v_f64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa
   return vfabs_v_f64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa
   return vfabs_v_f64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
index e1f42f94ecc9c..1c993d814d4c5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
@@ -10,6 +10,7 @@
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
 vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
   return vfadd_vv_f16mf4(op1, op2, vl);
 }
@@ -18,6 +19,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
 vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16mf4(op1, op2, vl);
 }
@@ -26,6 +28,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
 vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
   return vfadd_vv_f16mf2(op1, op2, vl);
 }
@@ -34,6 +37,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
 vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16mf2(op1, op2, vl);
 }
@@ -42,6 +46,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
 vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
   return vfadd_vv_f16m1(op1, op2, vl);
 }
@@ -50,6 +55,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
 vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m1(op1, op2, vl);
 }
@@ -58,6 +64,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
 vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
   return vfadd_vv_f16m2(op1, op2, vl);
 }
@@ -66,6 +73,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
 vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m2(op1, op2, vl);
 }
@@ -74,6 +82,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
 vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
   return vfadd_vv_f16m4(op1, op2, vl);
 }
@@ -82,6 +91,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
 vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m4(op1, op2, vl);
 }
@@ -90,6 +100,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
 vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
   return vfadd_vv_f16m8(op1, op2, vl);
 }
@@ -98,6 +109,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
 vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m8(op1, op2, vl);
 }
@@ -106,6 +118,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
 vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
   return vfadd_vv_f32mf2(op1, op2, vl);
 }
@@ -114,6 +127,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
 vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
   return vfadd_vf_f32mf2(op1, op2, vl);
 }
@@ -122,6 +136,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
 vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
   return vfadd_vv_f32m1(op1, op2, vl);
 }
@@ -130,6 +145,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
 vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m1(op1, op2, vl);
 }
@@ -138,6 +154,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
 vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
   return vfadd_vv_f32m2(op1, op2, vl);
 }
@@ -146,6 +163,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
 vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m2(op1, op2, vl);
 }
@@ -154,6 +172,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
 vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vfadd_vv_f32m4(op1, op2, vl);
 }
@@ -162,6 +181,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
 vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m4(op1, op2, vl);
 }
@@ -170,6 +190,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
 vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vfadd_vv_f32m8(op1, op2, vl);
 }
@@ -178,6 +199,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
 vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m8(op1, op2, vl);
 }
@@ -186,6 +208,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
 vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
   return vfadd_vv_f64m1(op1, op2, vl);
 }
@@ -194,6 +217,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
 vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m1(op1, op2, vl);
 }
@@ -202,6 +226,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
 vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
   return vfadd_vv_f64m2(op1, op2, vl);
 }
@@ -210,6 +235,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
 vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m2(op1, op2, vl);
 }
@@ -218,6 +244,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
 vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
   return vfadd_vv_f64m4(op1, op2, vl);
 }
@@ -226,6 +253,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
 vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m4(op1, op2, vl);
 }
@@ -234,6 +262,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
 vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vfadd_vv_f64m8(op1, op2, vl);
 }
@@ -242,6 +271,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
 vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m8(op1, op2, vl);
 }
@@ -250,6 +280,7 @@ vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
 vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
   return vfadd_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -258,6 +289,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
 vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -266,6 +298,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
 vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
   return vfadd_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -274,6 +307,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
 vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -282,6 +316,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
 vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
   return vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
 }
@@ -290,6 +325,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
 vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
 }
@@ -298,6 +334,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
 vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
   return vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -306,6 +343,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
 vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -314,6 +352,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
 vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
   return vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -322,6 +361,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
 vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -330,6 +370,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
 vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
   return vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
 }
@@ -338,6 +379,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
 vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
 }
@@ -346,6 +388,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
 vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
   return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -354,6 +397,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
 vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
   return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -362,6 +406,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
 vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
   return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
@@ -370,6 +415,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
 vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
@@ -378,6 +424,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
 vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
   return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -386,6 +433,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
 vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -394,6 +442,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
 vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -402,6 +451,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
 vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -410,6 +460,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
 vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
@@ -418,6 +469,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
 vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
   return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
@@ -426,6 +478,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
 vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
   return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
@@ -434,6 +487,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
 vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
@@ -442,6 +496,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
 vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
   return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -450,6 +505,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
 vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
@@ -458,6 +514,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
 vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
   return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -466,6 +523,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
 vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
@@ -474,6 +532,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vflo
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
 vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
@@ -482,6 +541,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloa
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
 vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
   return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
index 76f6be567769b..bcde19568b84a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfclass_v_u32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
   return vfclass_v_u32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
   return vfclass_v_u32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
   return vfclass_v_u32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
   return vfclass_v_u32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
   return vfclass_v_u64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
   return vfclass_v_u64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
   return vfclass_v_u64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
   return vfclass_v_u64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vfclass_v_u32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vfclass_v_u32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vfclass_v_u32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vfclass_v_u32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vfclass_v_u32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vfclass_v_u64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vfclass_v_u64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vfclass_v_u64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
index 5dea68a7bbe50..55a4a64d0a5d1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_x_f_v_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_x_f_v_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_x_f_v_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_x_f_v_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_x_f_v_i32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_xu_f_v_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_xu_f_v_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_xu_f_v_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_xu_f_v_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_xu_f_v_u32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
   return vfcvt_f_x_v_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
   return vfcvt_f_x_v_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
   return vfcvt_f_x_v_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
   return vfcvt_f_x_v_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
   return vfcvt_f_x_v_f32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
   return vfcvt_f_xu_v_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
   return vfcvt_f_xu_v_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
   return vfcvt_f_xu_v_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
   return vfcvt_f_xu_v_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
   return vfcvt_f_xu_v_f32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_x_f_v_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_x_f_v_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_x_f_v_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_x_f_v_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_rtz_x_f_v_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_xu_f_v_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_xu_f_v_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_xu_f_v_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_xu_f_v_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_rtz_xu_f_v_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
   return vfcvt_f_x_v_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
   return vfcvt_f_x_v_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
   return vfcvt_f_x_v_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
   return vfcvt_f_x_v_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
   return vfcvt_f_xu_v_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -525,7 +473,6 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
   return vfcvt_f_xu_v_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -535,7 +482,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
   return vfcvt_f_xu_v_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -545,7 +491,6 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
   return vfcvt_f_xu_v_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -556,7 +501,6 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vfcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -567,7 +511,6 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vfcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -578,7 +521,6 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -589,7 +531,6 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vfcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -600,7 +541,6 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -611,7 +551,6 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vfcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -622,7 +561,6 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -633,7 +571,6 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vfcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +581,6 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -655,7 +591,6 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vfcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -666,7 +601,6 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vfcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -678,7 +612,6 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask,
   return vfcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -689,7 +622,6 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -700,7 +632,6 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vfcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -711,7 +642,6 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -722,7 +652,6 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vfcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -733,7 +662,6 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -744,7 +672,6 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vfcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -755,7 +682,6 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -766,7 +692,6 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vfcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -777,7 +702,6 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +712,6 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -799,7 +722,6 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -810,7 +732,6 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -821,7 +742,6 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -833,7 +753,6 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask,
   return vfcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -844,7 +763,6 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -855,7 +773,6 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -866,7 +783,6 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -877,7 +793,6 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -888,7 +803,6 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -899,7 +813,6 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vfcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -910,7 +823,6 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -921,7 +833,6 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vfcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +843,6 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -943,7 +853,6 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vfcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +863,6 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -965,7 +873,6 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vfcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +883,6 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +893,6 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vfcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -998,7 +903,6 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +913,6 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vfcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +923,6 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +933,6 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vfcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1042,7 +943,6 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +953,6 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vfcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +963,6 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1075,7 +973,6 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1086,7 +983,6 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1097,7 +993,6 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1108,7 +1003,6 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1119,7 +1013,6 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1130,7 +1023,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
index d7a1c5b3d6b0f..ccad330646449 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfdiv_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfdiv_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfdiv_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfdiv_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfdiv_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfdiv_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfdiv_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfdiv_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfdiv_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfdiv_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfdiv_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfdiv_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfdiv_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfdiv_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfdiv_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfdiv_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfdiv_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfdiv_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -206,7 +187,6 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfdiv_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -217,7 +197,6 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -229,7 +208,6 @@ vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -240,7 +218,6 @@ vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -252,7 +229,6 @@ vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -263,7 +239,6 @@ vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +250,6 @@ vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -286,7 +260,6 @@ vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -298,7 +271,6 @@ vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -309,7 +281,6 @@ vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -321,7 +292,6 @@ vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -332,7 +302,6 @@ vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +313,6 @@ vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +323,6 @@ vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
index b25262efda676..74a2b7ce000b2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst_m_b1(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst_m_b1(op1, vl); }
 //
 long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst_m_b2(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst_m_b2(op1, vl); }
 //
 long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst_m_b4(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst_m_b4(op1, vl); }
 //
 long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst_m_b8(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -46,7 +41,6 @@ long test_vfirst_m_b16(vbool16_t op1, size_t vl) {
   return vfirst_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -56,7 +50,6 @@ long test_vfirst_m_b32(vbool32_t op1, size_t vl) {
   return vfirst_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -66,7 +59,6 @@ long test_vfirst_m_b64(vbool64_t op1, size_t vl) {
   return vfirst_m_b64(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -76,7 +68,6 @@ long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
   return vfirst_m_b1_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -86,7 +77,6 @@ long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
   return vfirst_m_b2_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -96,7 +86,6 @@ long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
   return vfirst_m_b4_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +95,6 @@ long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
   return vfirst_m_b8_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +104,6 @@ long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
   return vfirst_m_b16_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -126,7 +113,6 @@ long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
   return vfirst_m_b32_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
index 96d7109f53cdc..072a32e28c35a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmacc_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmacc_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmacc_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmacc_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmacc_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmacc_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmacc_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmacc_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmacc_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmacc_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmacc_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmacc_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmacc_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmacc_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmacc_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmacc_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmacc_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmacc_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmacc_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmacc_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmacc_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmacc_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmacc_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmacc_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmacc_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmacc_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmacc_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmacc_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
index 04fd7fc62673c..335343ee891cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmadd_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmadd_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmadd_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmadd_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmadd_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmadd_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmadd_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmadd_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmadd_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmadd_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmadd_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmadd_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmadd_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmadd_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmadd_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmadd_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmadd_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmadd_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmadd_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmadd_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmadd_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmadd_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmadd_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmadd_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmadd_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmadd_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmadd_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmadd_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmadd_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmadd_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmadd_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmadd_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmadd_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmadd_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmadd_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
index 3ca3b7821e472..ba17d0a995ea9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfmax_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfmax_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfmax_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfmax_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfmax_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfmax_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfmax_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfmax_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfmax_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfmax_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfmax_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfmax_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfmax_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfmax_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfmax_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfmax_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfmax_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfmax_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -206,7 +187,6 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfmax_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -217,7 +197,6 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfmax_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -229,7 +208,6 @@ vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -240,7 +218,6 @@ vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -252,7 +229,6 @@ vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -263,7 +239,6 @@ vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +250,6 @@ vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -286,7 +260,6 @@ vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -298,7 +271,6 @@ vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -309,7 +281,6 @@ vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -321,7 +292,6 @@ vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -332,7 +302,6 @@ vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +313,6 @@ vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +323,6 @@ vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
index 7f808e729c322..abdb7173bc43a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
   return vfmerge_vfm_f32mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
   return vfmerge_vfm_f32m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
   return vfmerge_vfm_f32m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
   return vfmerge_vfm_f32m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
   return vfmerge_vfm_f32m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
   return vfmerge_vfm_f64m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
   return vfmerge_vfm_f64m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
   return vfmerge_vfm_f64m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
index e3cd9d76b7826..3067ceee1001f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfmin_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfmin_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfmin_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfmin_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfmin_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfmin_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfmin_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfmin_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfmin_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfmin_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfmin_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfmin_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfmin_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfmin_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfmin_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfmin_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfmin_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfmin_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -206,7 +187,6 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfmin_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -217,7 +197,6 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfmin_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -229,7 +208,6 @@ vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -240,7 +218,6 @@ vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -252,7 +229,6 @@ vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -263,7 +239,6 @@ vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +250,6 @@ vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -286,7 +260,6 @@ vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -298,7 +271,6 @@ vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -309,7 +281,6 @@ vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -321,7 +292,6 @@ vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -332,7 +302,6 @@ vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +313,6 @@ vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +323,6 @@ vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
index 3a6fd075c3030..b35bbc857d2db 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmsac_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmsac_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmsac_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmsac_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmsac_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmsac_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmsac_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmsac_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmsac_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmsac_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmsac_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmsac_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmsac_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmsac_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmsac_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmsac_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmsac_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmsac_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsac_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsac_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmsac_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmsac_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmsac_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmsac_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmsac_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmsac_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmsac_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmsac_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
index 462a6d75eb7a3..0266d5168d82c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmsub_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmsub_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmsub_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmsub_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmsub_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmsub_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmsub_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmsub_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmsub_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmsub_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmsub_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmsub_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmsub_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmsub_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmsub_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmsub_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmsub_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmsub_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsub_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsub_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmsub_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmsub_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmsub_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmsub_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmsub_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmsub_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmsub_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmsub_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsub_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsub_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsub_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsub_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsub_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsub_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmsub_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
index 0282387c68220..e02df8cab9d95 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfmul_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfmul_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfmul_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfmul_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfmul_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfmul_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfmul_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfmul_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfmul_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfmul_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfmul_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfmul_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfmul_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfmul_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfmul_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfmul_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfmul_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfmul_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -206,7 +187,6 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -217,7 +197,6 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -229,7 +208,6 @@ vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -240,7 +218,6 @@ vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -252,7 +229,6 @@ vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -263,7 +239,6 @@ vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +250,6 @@ vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -286,7 +260,6 @@ vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -298,7 +271,6 @@ vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -309,7 +281,6 @@ vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -321,7 +292,6 @@ vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -332,7 +302,6 @@ vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +313,6 @@ vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +323,6 @@ vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
index b54cbc5462dcd..ab1d8b6a3f06d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) {
   return vfmv_v_f_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) {
   return vfmv_v_f_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) {
   return vfmv_v_f_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) {
   return vfmv_v_f_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) {
   return vfmv_v_f_f32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) {
   return vfmv_v_f_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) {
   return vfmv_v_f_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) {
   return vfmv_v_f_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) {
   return vfmv_v_f_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> [[SRC:%.*]])
@@ -105,7 +95,6 @@ float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) {
   return vfmv_f_s_f32mf2_f32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) {
   return vfmv_s_f_f32mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32(<vscale x 2 x float> [[SRC:%.*]])
@@ -125,7 +113,6 @@ float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) {
   return vfmv_f_s_f32m1_f32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) {
   return vfmv_s_f_f32m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float> [[SRC:%.*]])
@@ -145,7 +131,6 @@ float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) {
   return vfmv_f_s_f32m2_f32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) {
   return vfmv_s_f_f32m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float> [[SRC:%.*]])
@@ -165,7 +149,6 @@ float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) {
   return vfmv_f_s_f32m4_f32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) {
   return vfmv_s_f_f32m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float> [[SRC:%.*]])
@@ -185,7 +167,6 @@ float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) {
   return vfmv_f_s_f32m8_f32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) {
   return vfmv_s_f_f32m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> [[SRC:%.*]])
@@ -205,7 +185,6 @@ double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) {
   return vfmv_f_s_f64m1_f64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) {
   return vfmv_s_f_f64m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double> [[SRC:%.*]])
@@ -225,7 +203,6 @@ double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) {
   return vfmv_f_s_f64m2_f64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) {
   return vfmv_s_f_f64m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double> [[SRC:%.*]])
@@ -245,7 +221,6 @@ double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) {
   return vfmv_f_s_f64m4_f64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) {
   return vfmv_s_f_f64m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> [[SRC:%.*]])
@@ -265,7 +239,6 @@ double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) {
   return vfmv_f_s_f64m8_f64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
index 445042c1a26be..bda61e684acf0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_x_f_w_i16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_x_f_w_i16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_x_f_w_i16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_x_f_w_i16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_x_f_w_i16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_xu_f_w_u16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_xu_f_w_u16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_xu_f_w_u16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_xu_f_w_u16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_xu_f_w_u16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_x_f_w_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_x_f_w_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_x_f_w_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_x_f_w_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_rtz_x_f_w_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_xu_f_w_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_xu_f_w_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_xu_f_w_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_xu_f_w_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_rtz_xu_f_w_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
   return vfncvt_f_x_w_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
   return vfncvt_f_x_w_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
   return vfncvt_f_x_w_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
   return vfncvt_f_x_w_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
   return vfncvt_f_xu_w_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
   return vfncvt_f_xu_w_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
   return vfncvt_f_xu_w_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
   return vfncvt_f_xu_w_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_f_f_w_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_rod_f_f_w_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_f_f_w_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_rod_f_f_w_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_f_f_w_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_rod_f_f_w_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_f_f_w_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -525,7 +473,6 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_rod_f_f_w_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +483,6 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +494,6 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask,
   return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -559,7 +504,6 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -571,7 +515,6 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask,
   return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -582,7 +525,6 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -593,7 +535,6 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +545,6 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -615,7 +555,6 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -626,7 +565,6 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -637,7 +575,6 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -648,7 +585,6 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -660,7 +596,6 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask,
   return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -671,7 +606,6 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -683,7 +617,6 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask,
   return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -694,7 +627,6 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -706,7 +638,6 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask,
   return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -717,7 +648,6 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +658,6 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -739,7 +668,6 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -750,7 +678,6 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -761,7 +688,6 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -773,7 +699,6 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask,
   return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -784,7 +709,6 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -795,7 +719,6 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -806,7 +729,6 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -817,7 +739,6 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -828,7 +749,6 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -839,7 +759,6 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -850,7 +769,6 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -862,7 +780,6 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask,
   return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -873,7 +790,6 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -885,7 +801,6 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask,
   return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +811,6 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +822,6 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask,
   return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -919,7 +832,6 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -930,7 +842,6 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -942,7 +853,6 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask,
   return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -953,7 +863,6 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +873,6 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -975,7 +883,6 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +894,6 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask,
   return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -998,7 +904,6 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +914,6 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +924,6 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1032,7 +935,6 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask,
   return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +946,6 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask,
   return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1055,7 +956,6 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1067,7 +967,6 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask,
   return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1078,7 +977,6 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1090,7 +988,6 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask,
   return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1101,7 +998,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
index 6cd099b672746..98f37bad66aa4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
   return vfneg_v_f32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
   return vfneg_v_f32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
   return vfneg_v_f32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
   return vfneg_v_f32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
   return vfneg_v_f32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
   return vfneg_v_f64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
   return vfneg_v_f64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
   return vfneg_v_f64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
   return vfneg_v_f64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf
   return vfneg_v_f32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa
   return vfneg_v_f32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa
   return vfneg_v_f32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat
   return vfneg_v_f32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat
   return vfneg_v_f32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa
   return vfneg_v_f64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa
   return vfneg_v_f64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa
   return vfneg_v_f64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
index d6dd99cc49ace..af9cdbc07328d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmacc_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmacc_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmacc_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmacc_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmacc_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmacc_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmacc_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmacc_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmacc_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmacc_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmacc_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmacc_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmacc_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmacc_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmacc_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmacc_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmacc_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmacc_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmacc_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmacc_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmacc_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmacc_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmacc_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmacc_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmacc_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmacc_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmacc_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmacc_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
index e566f15113303..b9e76dcafe75d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmadd_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmadd_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmadd_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmadd_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmadd_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmadd_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmadd_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmadd_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmadd_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmadd_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmadd_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmadd_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmadd_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmadd_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmadd_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmadd_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmadd_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmadd_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmadd_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmadd_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmadd_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmadd_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmadd_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmadd_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmadd_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmadd_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmadd_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmadd_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmadd_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmadd_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmadd_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmadd_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmadd_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmadd_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmadd_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
index 981b0d1dff7d3..162726dd5e135 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmsac_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmsac_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmsac_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmsac_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmsac_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmsac_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmsac_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmsac_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmsac_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmsac_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmsac_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmsac_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmsac_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmsac_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmsac_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmsac_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmsac_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmsac_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsac_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsac_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsac_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsac_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsac_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsac_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmsac_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmsac_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmsac_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmsac_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
index b9ef6c5909d9a..8a0269efe414a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmsub_vv_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmsub_vf_f32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmsub_vv_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmsub_vf_f32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmsub_vv_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmsub_vf_f32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmsub_vv_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmsub_vf_f32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmsub_vv_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmsub_vf_f32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmsub_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmsub_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmsub_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmsub_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmsub_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmsub_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmsub_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmsub_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsub_vv_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsub_vf_f32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsub_vv_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsub_vf_f32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsub_vv_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsub_vf_f32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmsub_vv_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmsub_vf_f32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmsub_vv_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmsub_vf_f32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsub_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsub_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsub_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsub_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsub_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsub_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmsub_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
index b58be76069eca..e3a67d320c94d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfrdiv_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfrdiv_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfrdiv_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfrdiv_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfrdiv_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfrdiv_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfrdiv_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfrdiv_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfrdiv_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfrdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
index 04f36a7c81551..09db4584aef85 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfrec7_v_f32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
   return vfrec7_v_f32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
   return vfrec7_v_f32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
   return vfrec7_v_f32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
   return vfrec7_v_f32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
   return vfrec7_v_f64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
   return vfrec7_v_f64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
   return vfrec7_v_f64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
   return vfrec7_v_f64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfrec7_v_f32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfrec7_v_f32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfrec7_v_f32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfrec7_v_f32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfrec7_v_f32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfrec7_v_f64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfrec7_v_f64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfrec7_v_f64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
index 14138027bb92c..72f68e56123a3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -28,7 +26,6 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
   return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -39,7 +36,6 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
   return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -50,7 +46,6 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
   return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -61,7 +56,6 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
   return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -72,7 +66,6 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
   return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -83,7 +76,6 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
   return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -94,7 +86,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
   return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
   return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
index 74000ce8b33ce..2f7808be8153e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -28,7 +26,6 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
   return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -39,7 +36,6 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
   return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -50,7 +46,6 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
   return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -61,7 +56,6 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
   return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -72,7 +66,6 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
   return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -83,7 +76,6 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
   return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -94,7 +86,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
   return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
   return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
index a0edf9c38711d..a72ad90271993 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -28,7 +26,6 @@ vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
   return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -39,7 +36,6 @@ vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
   return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -50,7 +46,6 @@ vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
   return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -61,7 +56,6 @@ vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
   return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -72,7 +66,6 @@ vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
   return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -83,7 +76,6 @@ vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
   return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -94,7 +86,6 @@ vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
   return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
   return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -213,7 +195,6 @@ vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -225,7 +206,6 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredosum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -237,7 +217,6 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
   return vfredosum_vs_f32m1_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -249,7 +228,6 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
   return vfredosum_vs_f32m2_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -261,7 +239,6 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
   return vfredosum_vs_f32m4_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -273,7 +250,6 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
   return vfredosum_vs_f32m8_f32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -285,7 +261,6 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
   return vfredosum_vs_f64m1_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -297,7 +272,6 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
   return vfredosum_vs_f64m2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -309,7 +283,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
   return vfredosum_vs_f64m4_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -321,7 +294,6 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
   return vfredosum_vs_f64m8_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -333,7 +305,6 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredosum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +316,6 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredosum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -357,7 +327,6 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredosum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -369,7 +338,6 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredosum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -381,7 +349,6 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredosum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -393,7 +360,6 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredosum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -405,7 +371,6 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredosum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -417,7 +382,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredosum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
index e3d2881125301..ef78897d2c972 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfrsqrt7_v_f32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
   return vfrsqrt7_v_f32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
   return vfrsqrt7_v_f32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
   return vfrsqrt7_v_f32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
   return vfrsqrt7_v_f32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
   return vfrsqrt7_v_f64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
   return vfrsqrt7_v_f64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
   return vfrsqrt7_v_f64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
   return vfrsqrt7_v_f64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfrsqrt7_v_f32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfrsqrt7_v_f32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfrsqrt7_v_f32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfrsqrt7_v_f32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfrsqrt7_v_f32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfrsqrt7_v_f64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfrsqrt7_v_f64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfrsqrt7_v_f64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
index 9c2a2dfb21747..832198e27001f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfrsub_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfrsub_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfrsub_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfrsub_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfrsub_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfrsub_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfrsub_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfrsub_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfrsub_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
index bdf85243c0e93..eaa7cbbd950b3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsgnj_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsgnj_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsgnj_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsgnj_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsgnj_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsgnj_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsgnj_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsgnj_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsgnj_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsgnj_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsgnj_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsgnj_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsgnj_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsgnj_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsgnj_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsgnj_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsgnj_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfsgnj_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -205,7 +186,6 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsgnjn_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -215,7 +195,6 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsgnjn_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsgnjn_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +214,6 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsgnjn_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -247,7 +224,6 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsgnjn_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -257,7 +233,6 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsgnjn_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -268,7 +243,6 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsgnjn_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -278,7 +252,6 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsgnjn_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -289,7 +262,6 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsgnjn_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -299,7 +271,6 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsgnjn_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -310,7 +281,6 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsgnjn_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -320,7 +290,6 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsgnjn_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -331,7 +300,6 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsgnjn_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -341,7 +309,6 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsgnjn_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -352,7 +319,6 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsgnjn_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -362,7 +328,6 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsgnjn_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -373,7 +338,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsgnjn_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -383,7 +347,6 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfsgnjn_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +357,6 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsgnjx_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +366,6 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsgnjx_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -415,7 +376,6 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsgnjx_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -425,7 +385,6 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsgnjx_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -436,7 +395,6 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsgnjx_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -446,7 +404,6 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsgnjx_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -457,7 +414,6 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsgnjx_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -467,7 +423,6 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsgnjx_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -478,7 +433,6 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsgnjx_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -488,7 +442,6 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsgnjx_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -499,7 +452,6 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsgnjx_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -509,7 +461,6 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsgnjx_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -520,7 +471,6 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsgnjx_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -530,7 +480,6 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsgnjx_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -541,7 +490,6 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsgnjx_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -551,7 +499,6 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsgnjx_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -562,7 +509,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsgnjx_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -572,7 +518,6 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfsgnjx_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +529,6 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsgnj_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -595,7 +539,6 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsgnj_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -607,7 +550,6 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsgnj_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -618,7 +560,6 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsgnj_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -630,7 +571,6 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsgnj_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -641,7 +581,6 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsgnj_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -653,7 +592,6 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsgnj_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -664,7 +602,6 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsgnj_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -676,7 +613,6 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsgnj_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -687,7 +623,6 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsgnj_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -699,7 +634,6 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsgnj_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -710,7 +644,6 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsgnj_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -722,7 +655,6 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsgnj_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -733,7 +665,6 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsgnj_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -745,7 +676,6 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsgnj_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -756,7 +686,6 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsgnj_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -768,7 +697,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfsgnj_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -779,7 +707,6 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfsgnj_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -791,7 +718,6 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsgnjn_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -803,7 +729,6 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsgnjn_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -815,7 +740,6 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -826,7 +750,6 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -838,7 +761,6 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -849,7 +771,6 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -861,7 +782,6 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +792,6 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsgnjn_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +803,6 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsgnjn_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -895,7 +813,6 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsgnjn_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -907,7 +824,6 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsgnjn_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -918,7 +834,6 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsgnjn_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -930,7 +845,6 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsgnjn_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -941,7 +855,6 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsgnjn_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -953,7 +866,6 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsgnjn_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +876,6 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsgnjn_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +887,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfsgnjn_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +897,6 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfsgnjn_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -999,7 +908,6 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsgnjx_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +919,6 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsgnjx_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1023,7 +930,6 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsgnjx_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +940,6 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsgnjx_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1046,7 +951,6 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsgnjx_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +961,6 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsgnjx_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1069,7 +972,6 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsgnjx_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1080,7 +982,6 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsgnjx_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1092,7 +993,6 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsgnjx_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1103,7 +1003,6 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsgnjx_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1115,7 +1014,6 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsgnjx_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1126,7 +1024,6 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsgnjx_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1138,7 +1035,6 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsgnjx_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1149,7 +1045,6 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsgnjx_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1161,7 +1056,6 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsgnjx_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1172,7 +1066,6 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsgnjx_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1077,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfsgnjx_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
index 0238e30bed3dc..83e2b1812a2e5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
   return vfslide1down_vf_f32mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
   return vfslide1down_vf_f32m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
   return vfslide1down_vf_f32m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
   return vfslide1down_vf_f32m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
   return vfslide1down_vf_f32m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
   return vfslide1down_vf_f64m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
   return vfslide1down_vf_f64m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
   return vfslide1down_vf_f64m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
   return vfslide1down_vf_f64m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask,
   return vfslide1down_vf_f32mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -130,7 +119,6 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask,
   return vfslide1down_vf_f32m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -143,7 +131,6 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask,
   return vfslide1down_vf_f32m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -155,7 +142,6 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfslide1down_vf_f32m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -167,7 +153,6 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfslide1down_vf_f32m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -180,7 +165,6 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask,
   return vfslide1down_vf_f64m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -193,7 +177,6 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask,
   return vfslide1down_vf_f64m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -206,7 +189,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask,
   return vfslide1down_vf_f64m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
index d60439d1988fa..771c08646a030 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
   return vfslide1up_vf_f32mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
   return vfslide1up_vf_f32m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
   return vfslide1up_vf_f32m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
   return vfslide1up_vf_f32m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
   return vfslide1up_vf_f32m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
   return vfslide1up_vf_f64m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
   return vfslide1up_vf_f64m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
   return vfslide1up_vf_f64m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
   return vfslide1up_vf_f64m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
   return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
index 027135381143f..7d2a0b3366274 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfsqrt_v_f32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
   return vfsqrt_v_f32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
   return vfsqrt_v_f32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
   return vfsqrt_v_f32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
   return vfsqrt_v_f32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
   return vfsqrt_v_f64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
   return vfsqrt_v_f64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
   return vfsqrt_v_f64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
   return vfsqrt_v_f64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
index c32c2459c45d1..dee966722bc3d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsub_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsub_vf_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsub_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsub_vf_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsub_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsub_vf_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsub_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsub_vf_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsub_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsub_vf_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsub_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsub_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsub_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsub_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsub_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsub_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsub_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfsub_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -206,7 +187,6 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -217,7 +197,6 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vfsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -229,7 +208,6 @@ vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -240,7 +218,6 @@ vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -252,7 +229,6 @@ vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -263,7 +239,6 @@ vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +250,6 @@ vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -286,7 +260,6 @@ vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -298,7 +271,6 @@ vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -309,7 +281,6 @@ vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -321,7 +292,6 @@ vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -332,7 +302,6 @@ vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +313,6 @@ vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +323,6 @@ vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
index 381239416aea9..a5f47d63765cb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfwadd_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfwadd_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
   return vfwadd_wv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
   return vfwadd_wf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfwadd_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
   return vfwadd_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
   return vfwadd_wv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
   return vfwadd_wf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfwadd_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
   return vfwadd_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
   return vfwadd_wv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
   return vfwadd_wf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfwadd_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
   return vfwadd_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
   return vfwadd_wv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
   return vfwadd_wf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -185,7 +168,6 @@ vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -196,7 +178,6 @@ vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -208,7 +189,6 @@ vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwadd_wv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -219,7 +199,6 @@ vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -231,7 +210,6 @@ vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -242,7 +220,6 @@ vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -254,7 +231,6 @@ vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +241,6 @@ vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -277,7 +252,6 @@ vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -288,7 +262,6 @@ vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -300,7 +273,6 @@ vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -311,7 +283,6 @@ vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
index 46bb0de95fde9..06a1703da9c13 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
   return vfwcvt_f_x_v_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
   return vfwcvt_f_x_v_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
   return vfwcvt_f_x_v_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
   return vfwcvt_f_x_v_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
   return vfwcvt_f_x_v_f32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
   return vfwcvt_f_xu_v_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
   return vfwcvt_f_xu_v_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
   return vfwcvt_f_xu_v_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
   return vfwcvt_f_xu_v_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
   return vfwcvt_f_xu_v_f32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_x_f_v_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_rtz_x_f_v_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_x_f_v_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_rtz_x_f_v_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_x_f_v_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_rtz_x_f_v_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_x_f_v_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_rtz_x_f_v_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_xu_f_v_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_rtz_xu_f_v_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_xu_f_v_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_rtz_xu_f_v_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_xu_f_v_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_rtz_xu_f_v_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_xu_f_v_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_rtz_xu_f_v_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
   return vfwcvt_f_x_v_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
   return vfwcvt_f_x_v_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
   return vfwcvt_f_x_v_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
   return vfwcvt_f_x_v_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
   return vfwcvt_f_xu_v_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
   return vfwcvt_f_xu_v_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
   return vfwcvt_f_xu_v_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
   return vfwcvt_f_xu_v_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_f_f_v_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_f_f_v_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_f_f_v_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_f_f_v_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask,
   return vfwcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -408,7 +368,6 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfwcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -419,7 +378,6 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -430,7 +388,6 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -441,7 +398,6 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -453,7 +409,6 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask,
   return vfwcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +419,6 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vfwcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -475,7 +429,6 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -486,7 +439,6 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -497,7 +449,6 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -508,7 +459,6 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vfwcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -519,7 +469,6 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vfwcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -530,7 +479,6 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -541,7 +489,6 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vfwcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -552,7 +499,6 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -563,7 +509,6 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vfwcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +519,6 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -585,7 +529,6 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vfwcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +539,6 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vfwcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +550,6 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask,
   return vfwcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -619,7 +560,6 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -631,7 +571,6 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask,
   return vfwcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -642,7 +581,6 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +592,6 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask,
   return vfwcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +602,6 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -676,7 +612,6 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vfwcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -687,7 +622,6 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -698,7 +632,6 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -709,7 +642,6 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -720,7 +652,6 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -731,7 +662,6 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -742,7 +672,6 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -753,7 +682,6 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +692,6 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -775,7 +702,6 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwcvt_f_f_v_f64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -786,7 +712,6 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -797,7 +722,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
index a1d421c51ba21..9874057f8b7aa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwmacc_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwmacc_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwmacc_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwmacc_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwmacc_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwmacc_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwmacc_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwmacc_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
index f87dd97051f04..b02a1feed2191 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwmsac_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwmsac_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwmsac_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwmsac_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwmsac_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwmsac_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwmsac_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwmsac_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
index c28e08858368f..2c7734983aa28 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfwmul_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfwmul_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfwmul_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
   return vfwmul_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfwmul_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
   return vfwmul_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfwmul_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
   return vfwmul_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -101,7 +92,6 @@ vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -112,7 +102,6 @@ vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -124,7 +113,6 @@ vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -135,7 +123,6 @@ vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
index 07edd16100940..1a1530d41056a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwnmacc_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwnmacc_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwnmacc_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwnmacc_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwnmacc_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwnmacc_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwnmacc_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwnmacc_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwnmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
index 9f85020b247c5..ae2c8d1085298 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwnmsac_vv_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwnmsac_vf_f64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwnmsac_vv_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwnmsac_vf_f64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwnmsac_vv_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwnmsac_vf_f64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwnmsac_vv_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwnmsac_vf_f64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwnmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
index b39938ab425f5..39078a1fe9a33 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
   return vfwredosum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -29,7 +27,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
   return vfwredosum_vs_f32m1_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -41,7 +38,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
   return vfwredosum_vs_f32m2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -53,7 +49,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
   return vfwredosum_vs_f32m4_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -65,7 +60,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
   return vfwredosum_vs_f32m8_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -77,7 +71,6 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfwredosum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -89,7 +82,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfwredosum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -101,7 +93,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfwredosum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -113,7 +104,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfwredosum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
index 399a24ce2463f..89447ad8dc9fe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
   return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -29,7 +27,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
   return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -41,7 +38,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
   return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -53,7 +49,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
   return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -65,7 +60,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
   return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -77,7 +71,6 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -89,7 +82,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -101,7 +93,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -113,7 +104,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
index 1aa709af037af..8eb6a199e149b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfwsub_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfwsub_vf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
   return vfwsub_wv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
   return vfwsub_wf_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfwsub_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
   return vfwsub_vf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
   return vfwsub_wv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
   return vfwsub_wf_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfwsub_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
   return vfwsub_vf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
   return vfwsub_wv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
   return vfwsub_wf_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfwsub_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
   return vfwsub_vf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
   return vfwsub_wv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
   return vfwsub_wf_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -185,7 +168,6 @@ vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -196,7 +178,6 @@ vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -208,7 +189,6 @@ vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwsub_wv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -219,7 +199,6 @@ vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -231,7 +210,6 @@ vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -242,7 +220,6 @@ vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -254,7 +231,6 @@ vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +241,6 @@ vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -277,7 +252,6 @@ vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -288,7 +262,6 @@ vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -300,7 +273,6 @@ vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -311,7 +283,6 @@ vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
index 7297ddd9580c7..7c1f89b88daf1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
 //
 vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
 //
 vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
 //
 vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
 //
 vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
 //
 vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]])
@@ -60,7 +53,6 @@ vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
 //
 vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]])
@@ -68,7 +60,6 @@ vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
 //
 vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]])
@@ -76,7 +67,6 @@ vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
 //
 vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]])
@@ -84,7 +74,6 @@ vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
 //
 vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]])
@@ -92,7 +81,6 @@ vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
 //
 vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]])
@@ -100,7 +88,6 @@ vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
 //
 vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]])
@@ -108,7 +95,6 @@ vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
 //
 vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]])
@@ -116,7 +102,6 @@ vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
 //
 vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]])
@@ -124,7 +109,6 @@ vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
 //
 vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]])
@@ -132,7 +116,6 @@ vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
 //
 vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]])
@@ -140,7 +123,6 @@ vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
 //
 vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]])
@@ -148,7 +130,6 @@ vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
 //
 vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]])
@@ -156,7 +137,6 @@ vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
 //
 vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]])
@@ -164,7 +144,6 @@ vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
 //
 vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]])
@@ -172,7 +151,6 @@ vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
 //
 vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]])
@@ -180,7 +158,6 @@ vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
 //
 vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -191,7 +168,6 @@ vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vid_v_u8mf8_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -202,7 +178,6 @@ vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vid_v_u8mf4_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -213,7 +188,6 @@ vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vid_v_u8mf2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -223,7 +197,6 @@ vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
   return vid_v_u8m1_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -233,7 +206,6 @@ vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
   return vid_v_u8m2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -243,7 +215,6 @@ vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
   return vid_v_u8m4_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -253,7 +224,6 @@ vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
   return vid_v_u8m8_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -264,7 +234,6 @@ vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vid_v_u16mf4_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +244,6 @@ vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vid_v_u16mf2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -286,7 +254,6 @@ vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vid_v_u16m1_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -297,7 +264,6 @@ vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vid_v_u16m2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +274,6 @@ vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vid_v_u16m4_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +284,6 @@ vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vid_v_u16m8_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +294,6 @@ vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vid_v_u32mf2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +304,6 @@ vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vid_v_u32m1_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +314,6 @@ vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vid_v_u32m2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -363,7 +324,6 @@ vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vid_v_u32m4_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -374,7 +334,6 @@ vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vid_v_u32m8_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -385,7 +344,6 @@ vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vid_v_u64m1_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -396,7 +354,6 @@ vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vid_v_u64m2_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -407,7 +364,6 @@ vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vid_v_u64m4_m(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
index 30a758413e4f0..cf983abbbaf8b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
   return viota_m_u8mf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
   return viota_m_u8mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
   return viota_m_u8mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
   return viota_m_u8m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
   return viota_m_u8m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
   return viota_m_u8m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
   return viota_m_u8m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
   return viota_m_u16mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
   return viota_m_u16mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
   return viota_m_u16m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
   return viota_m_u16m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
   return viota_m_u16m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
   return viota_m_u16m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
   return viota_m_u32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
   return viota_m_u32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
   return viota_m_u32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
   return viota_m_u32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
   return viota_m_u32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
   return viota_m_u64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
   return viota_m_u64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
   return viota_m_u64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
   return viota_m_u64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return viota_m_u8mf8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return viota_m_u8mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -257,7 +232,6 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return viota_m_u8mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -268,7 +242,6 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return viota_m_u8m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -279,7 +252,6 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return viota_m_u8m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -290,7 +262,6 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return viota_m_u8m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -301,7 +272,6 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return viota_m_u8m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -312,7 +282,6 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return viota_m_u16mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -323,7 +292,6 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return viota_m_u16mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +302,6 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return viota_m_u16m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +312,6 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return viota_m_u16m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -356,7 +322,6 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return viota_m_u16m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -367,7 +332,6 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return viota_m_u16m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -378,7 +342,6 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return viota_m_u32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -389,7 +352,6 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return viota_m_u32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +362,6 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return viota_m_u32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -411,7 +372,6 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return viota_m_u32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -422,7 +382,6 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return viota_m_u32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -433,7 +392,6 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return viota_m_u64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -444,7 +402,6 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return viota_m_u64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -455,7 +412,6 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return viota_m_u64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
index 9b571afa96535..6cc41e9acc32b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
@@ -6,7 +6,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -20,7 +19,6 @@ vint8mf8_t test_vle8ff_v_i8mf8 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8mf8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -34,7 +32,6 @@ vint8mf4_t test_vle8ff_v_i8mf4 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8mf4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -48,7 +45,6 @@ vint8mf2_t test_vle8ff_v_i8mf2 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -62,7 +58,6 @@ vint8m1_t test_vle8ff_v_i8m1 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -76,7 +71,6 @@ vint8m2_t test_vle8ff_v_i8m2 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -90,7 +84,6 @@ vint8m4_t test_vle8ff_v_i8m4 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -104,7 +97,6 @@ vint8m8_t test_vle8ff_v_i8m8 (const int8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_i8m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -118,7 +110,6 @@ vuint8mf8_t test_vle8ff_v_u8mf8 (const uint8_t *base, size_t *new_vl, size_t vl)
   return vle8ff_v_u8mf8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -132,7 +123,6 @@ vuint8mf4_t test_vle8ff_v_u8mf4 (const uint8_t *base, size_t *new_vl, size_t vl)
   return vle8ff_v_u8mf4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -146,7 +136,6 @@ vuint8mf2_t test_vle8ff_v_u8mf2 (const uint8_t *base, size_t *new_vl, size_t vl)
   return vle8ff_v_u8mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -160,7 +149,6 @@ vuint8m1_t test_vle8ff_v_u8m1 (const uint8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_u8m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -174,7 +162,6 @@ vuint8m2_t test_vle8ff_v_u8m2 (const uint8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_u8m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -188,7 +175,6 @@ vuint8m4_t test_vle8ff_v_u8m4 (const uint8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_u8m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -202,7 +188,6 @@ vuint8m8_t test_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl) {
   return vle8ff_v_u8m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -216,7 +201,6 @@ vint8mf8_t test_vle8ff_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const in
   return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -230,7 +214,6 @@ vint8mf4_t test_vle8ff_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const in
   return vle8ff_v_i8mf4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -244,7 +227,6 @@ vint8mf2_t test_vle8ff_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const in
   return vle8ff_v_i8mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -258,7 +240,6 @@ vint8m1_t test_vle8ff_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t
   return vle8ff_v_i8m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -272,7 +253,6 @@ vint8m2_t test_vle8ff_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t
   return vle8ff_v_i8m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -286,7 +266,6 @@ vint8m4_t test_vle8ff_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t
   return vle8ff_v_i8m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -300,7 +279,6 @@ vint8m8_t test_vle8ff_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t
   return vle8ff_v_i8m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -314,7 +292,6 @@ vuint8mf8_t test_vle8ff_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const
   return vle8ff_v_u8mf8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -328,7 +305,6 @@ vuint8mf4_t test_vle8ff_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const
   return vle8ff_v_u8mf4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -342,7 +318,6 @@ vuint8mf2_t test_vle8ff_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const
   return vle8ff_v_u8mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -356,7 +331,6 @@ vuint8m1_t test_vle8ff_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint
   return vle8ff_v_u8m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -370,7 +344,6 @@ vuint8m2_t test_vle8ff_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint
   return vle8ff_v_u8m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -384,7 +357,6 @@ vuint8m4_t test_vle8ff_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint
   return vle8ff_v_u8m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -398,7 +370,6 @@ vuint8m8_t test_vle8ff_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint
   return vle8ff_v_u8m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -412,7 +383,6 @@ vint16mf4_t test_vle16ff_v_i16mf4 (const int16_t *base, size_t *new_vl, size_t v
   return vle16ff_v_i16mf4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -426,7 +396,6 @@ vint16mf2_t test_vle16ff_v_i16mf2 (const int16_t *base, size_t *new_vl, size_t v
   return vle16ff_v_i16mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -440,7 +409,6 @@ vint16m1_t test_vle16ff_v_i16m1 (const int16_t *base, size_t *new_vl, size_t vl)
   return vle16ff_v_i16m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -454,7 +422,6 @@ vint16m2_t test_vle16ff_v_i16m2 (const int16_t *base, size_t *new_vl, size_t vl)
   return vle16ff_v_i16m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -468,7 +435,6 @@ vint16m4_t test_vle16ff_v_i16m4 (const int16_t *base, size_t *new_vl, size_t vl)
   return vle16ff_v_i16m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -482,7 +448,6 @@ vint16m8_t test_vle16ff_v_i16m8 (const int16_t *base, size_t *new_vl, size_t vl)
   return vle16ff_v_i16m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -496,7 +461,6 @@ vuint16mf4_t test_vle16ff_v_u16mf4 (const uint16_t *base, size_t *new_vl, size_t
   return vle16ff_v_u16mf4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -510,7 +474,6 @@ vuint16mf2_t test_vle16ff_v_u16mf2 (const uint16_t *base, size_t *new_vl, size_t
   return vle16ff_v_u16mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -524,7 +487,6 @@ vuint16m1_t test_vle16ff_v_u16m1 (const uint16_t *base, size_t *new_vl, size_t v
   return vle16ff_v_u16m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -538,7 +500,6 @@ vuint16m2_t test_vle16ff_v_u16m2 (const uint16_t *base, size_t *new_vl, size_t v
   return vle16ff_v_u16m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -552,7 +513,6 @@ vuint16m4_t test_vle16ff_v_u16m4 (const uint16_t *base, size_t *new_vl, size_t v
   return vle16ff_v_u16m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -566,7 +526,6 @@ vuint16m8_t test_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t v
   return vle16ff_v_u16m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -580,7 +539,6 @@ vint16mf4_t test_vle16ff_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, cons
   return vle16ff_v_i16mf4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -594,7 +552,6 @@ vint16mf2_t test_vle16ff_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, cons
   return vle16ff_v_i16mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -608,7 +565,6 @@ vint16m1_t test_vle16ff_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const i
   return vle16ff_v_i16m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -622,7 +578,6 @@ vint16m2_t test_vle16ff_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const in
   return vle16ff_v_i16m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -636,7 +591,6 @@ vint16m4_t test_vle16ff_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const in
   return vle16ff_v_i16m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -650,7 +604,6 @@ vint16m8_t test_vle16ff_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const in
   return vle16ff_v_i16m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -664,7 +617,6 @@ vuint16mf4_t test_vle16ff_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, co
   return vle16ff_v_u16mf4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -678,7 +630,6 @@ vuint16mf2_t test_vle16ff_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, co
   return vle16ff_v_u16mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -692,7 +643,6 @@ vuint16m1_t test_vle16ff_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const
   return vle16ff_v_u16m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -706,7 +656,6 @@ vuint16m2_t test_vle16ff_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const
   return vle16ff_v_u16m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -720,7 +669,6 @@ vuint16m4_t test_vle16ff_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const
   return vle16ff_v_u16m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -734,7 +682,6 @@ vuint16m8_t test_vle16ff_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const
   return vle16ff_v_u16m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -748,7 +695,6 @@ vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t v
   return vle32ff_v_i32mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -762,7 +708,6 @@ vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl)
   return vle32ff_v_i32m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -776,7 +721,6 @@ vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl)
   return vle32ff_v_i32m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -790,7 +734,6 @@ vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl)
   return vle32ff_v_i32m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -804,7 +747,6 @@ vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl)
   return vle32ff_v_i32m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -818,7 +760,6 @@ vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t
   return vle32ff_v_u32mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -832,7 +773,6 @@ vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t v
   return vle32ff_v_u32m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -846,7 +786,6 @@ vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t v
   return vle32ff_v_u32m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -860,7 +799,6 @@ vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t v
   return vle32ff_v_u32m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -874,7 +812,6 @@ vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t v
   return vle32ff_v_u32m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -888,7 +825,6 @@ vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t v
   return vle32ff_v_f32mf2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -902,7 +838,6 @@ vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl)
   return vle32ff_v_f32m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -916,7 +851,6 @@ vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl)
   return vle32ff_v_f32m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -930,7 +864,6 @@ vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl)
   return vle32ff_v_f32m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -944,7 +877,6 @@ vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl)
   return vle32ff_v_f32m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -958,7 +890,6 @@ vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, cons
   return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -972,7 +903,6 @@ vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const i
   return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -986,7 +916,6 @@ vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const i
   return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1000,7 +929,6 @@ vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const in
   return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1014,7 +942,6 @@ vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const in
   return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1028,7 +955,6 @@ vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, co
   return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1042,7 +968,6 @@ vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const
   return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1056,7 +981,6 @@ vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const
   return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1070,7 +994,6 @@ vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const
   return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1084,7 +1007,6 @@ vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const
   return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1098,7 +1020,6 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff,
   return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1112,7 +1033,6 @@ vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, con
   return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1126,7 +1046,6 @@ vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, con
   return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1140,7 +1059,6 @@ vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1154,7 +1072,6 @@ vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, cons
   return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1168,7 +1085,6 @@ vint64m1_t test_vle64ff_v_i64m1 (const int64_t *base, size_t *new_vl, size_t vl)
   return vle64ff_v_i64m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1182,7 +1098,6 @@ vint64m2_t test_vle64ff_v_i64m2 (const int64_t *base, size_t *new_vl, size_t vl)
   return vle64ff_v_i64m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1196,7 +1111,6 @@ vint64m4_t test_vle64ff_v_i64m4 (const int64_t *base, size_t *new_vl, size_t vl)
   return vle64ff_v_i64m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1210,7 +1124,6 @@ vint64m8_t test_vle64ff_v_i64m8 (const int64_t *base, size_t *new_vl, size_t vl)
   return vle64ff_v_i64m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1224,7 +1137,6 @@ vuint64m1_t test_vle64ff_v_u64m1 (const uint64_t *base, size_t *new_vl, size_t v
   return vle64ff_v_u64m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1238,7 +1150,6 @@ vuint64m2_t test_vle64ff_v_u64m2 (const uint64_t *base, size_t *new_vl, size_t v
   return vle64ff_v_u64m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1252,7 +1163,6 @@ vuint64m4_t test_vle64ff_v_u64m4 (const uint64_t *base, size_t *new_vl, size_t v
   return vle64ff_v_u64m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1266,7 +1176,6 @@ vuint64m8_t test_vle64ff_v_u64m8 (const uint64_t *base, size_t *new_vl, size_t v
   return vle64ff_v_u64m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1280,7 +1189,6 @@ vfloat64m1_t test_vle64ff_v_f64m1 (const double *base, size_t *new_vl, size_t vl
   return vle64ff_v_f64m1(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1294,7 +1202,6 @@ vfloat64m2_t test_vle64ff_v_f64m2 (const double *base, size_t *new_vl, size_t vl
   return vle64ff_v_f64m2(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1308,7 +1215,6 @@ vfloat64m4_t test_vle64ff_v_f64m4 (const double *base, size_t *new_vl, size_t vl
   return vle64ff_v_f64m4(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1322,7 +1228,6 @@ vfloat64m8_t test_vle64ff_v_f64m8 (const double *base, size_t *new_vl, size_t vl
   return vle64ff_v_f64m8(base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1336,7 +1241,6 @@ vint64m1_t test_vle64ff_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const i
   return vle64ff_v_i64m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1350,7 +1254,6 @@ vint64m2_t test_vle64ff_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const i
   return vle64ff_v_i64m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1364,7 +1267,6 @@ vint64m4_t test_vle64ff_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const i
   return vle64ff_v_i64m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1378,7 +1280,6 @@ vint64m8_t test_vle64ff_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const in
   return vle64ff_v_i64m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1392,7 +1293,6 @@ vuint64m1_t test_vle64ff_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const
   return vle64ff_v_u64m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1406,7 +1306,6 @@ vuint64m2_t test_vle64ff_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const
   return vle64ff_v_u64m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1420,7 +1319,6 @@ vuint64m4_t test_vle64ff_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const
   return vle64ff_v_u64m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1434,7 +1332,6 @@ vuint64m8_t test_vle64ff_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const
   return vle64ff_v_u64m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1448,7 +1345,6 @@ vfloat64m1_t test_vle64ff_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, con
   return vle64ff_v_f64m1_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1462,7 +1358,6 @@ vfloat64m2_t test_vle64ff_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, con
   return vle64ff_v_f64m2_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1476,7 +1371,6 @@ vfloat64m4_t test_vle64ff_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, con
   return vle64ff_v_f64m4_m(mask, maskedoff, base, new_vl, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
index 3aeb401b4e75a..e781c23806749 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -15,7 +14,6 @@ vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
   return vlmul_ext_v_i8mf8_i8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -25,7 +23,6 @@ vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
   return vlmul_ext_v_i8mf8_i8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -35,7 +32,6 @@ vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
   return vlmul_ext_v_i8mf8_i8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -45,7 +41,6 @@ vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
   return vlmul_ext_v_i8mf8_i8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -55,7 +50,6 @@ vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
   return vlmul_ext_v_i8mf8_i8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -65,7 +59,6 @@ vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
   return vlmul_ext_v_i8mf8_i8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -75,7 +68,6 @@ vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
   return vlmul_ext_v_i8mf4_i8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -85,7 +77,6 @@ vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
   return vlmul_ext_v_i8mf4_i8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -95,7 +86,6 @@ vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
   return vlmul_ext_v_i8mf4_i8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -105,7 +95,6 @@ vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
   return vlmul_ext_v_i8mf4_i8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -115,7 +104,6 @@ vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
   return vlmul_ext_v_i8mf4_i8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -125,7 +113,6 @@ vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
   return vlmul_ext_v_i8mf2_i8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -135,7 +122,6 @@ vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
   return vlmul_ext_v_i8mf2_i8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -145,7 +131,6 @@ vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
   return vlmul_ext_v_i8mf2_i8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -155,7 +140,6 @@ vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
   return vlmul_ext_v_i8mf2_i8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -165,7 +149,6 @@ vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
   return vlmul_ext_v_i8m1_i8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -175,7 +158,6 @@ vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
   return vlmul_ext_v_i8m1_i8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -185,7 +167,6 @@ vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
   return vlmul_ext_v_i8m1_i8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -195,7 +176,6 @@ vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
   return vlmul_ext_v_i8m2_i8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -205,7 +185,6 @@ vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
   return vlmul_ext_v_i8m2_i8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -215,7 +194,6 @@ vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
   return vlmul_ext_v_i8m4_i8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -225,7 +203,6 @@ vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
   return vlmul_ext_v_i16mf4_i16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -235,7 +212,6 @@ vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
   return vlmul_ext_v_i16mf4_i16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -245,7 +221,6 @@ vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
   return vlmul_ext_v_i16mf4_i16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -255,7 +230,6 @@ vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
   return vlmul_ext_v_i16mf4_i16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -265,7 +239,6 @@ vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
   return vlmul_ext_v_i16mf4_i16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -275,7 +248,6 @@ vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
   return vlmul_ext_v_i16mf2_i16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -285,7 +257,6 @@ vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
   return vlmul_ext_v_i16mf2_i16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -295,7 +266,6 @@ vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
   return vlmul_ext_v_i16mf2_i16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -305,7 +275,6 @@ vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
   return vlmul_ext_v_i16mf2_i16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -315,7 +284,6 @@ vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
   return vlmul_ext_v_i16m1_i16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -325,7 +293,6 @@ vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
   return vlmul_ext_v_i16m1_i16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -335,7 +302,6 @@ vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
   return vlmul_ext_v_i16m1_i16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -345,7 +311,6 @@ vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
   return vlmul_ext_v_i16m2_i16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -355,7 +320,6 @@ vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
   return vlmul_ext_v_i16m2_i16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -365,7 +329,6 @@ vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
   return vlmul_ext_v_i16m4_i16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -375,7 +338,6 @@ vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
   return vlmul_ext_v_i32mf2_i32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -385,7 +347,6 @@ vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
   return vlmul_ext_v_i32mf2_i32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -395,7 +356,6 @@ vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
   return vlmul_ext_v_i32mf2_i32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -405,7 +365,6 @@ vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
   return vlmul_ext_v_i32mf2_i32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -415,7 +374,6 @@ vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
   return vlmul_ext_v_i32m1_i32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -425,7 +383,6 @@ vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
   return vlmul_ext_v_i32m1_i32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -435,7 +392,6 @@ vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
   return vlmul_ext_v_i32m1_i32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -445,7 +401,6 @@ vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
   return vlmul_ext_v_i32m2_i32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -455,7 +410,6 @@ vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
   return vlmul_ext_v_i32m2_i32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -465,7 +419,6 @@ vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
   return vlmul_ext_v_i32m4_i32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
@@ -475,7 +428,6 @@ vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
   return vlmul_ext_v_i64m1_i64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
@@ -485,7 +437,6 @@ vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
   return vlmul_ext_v_i64m1_i64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
@@ -495,7 +446,6 @@ vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
   return vlmul_ext_v_i64m1_i64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
@@ -505,7 +455,6 @@ vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
   return vlmul_ext_v_i64m2_i64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
@@ -515,7 +464,6 @@ vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
   return vlmul_ext_v_i64m2_i64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
@@ -525,7 +473,6 @@ vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
   return vlmul_ext_v_i64m4_i64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -535,7 +482,6 @@ vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
   return vlmul_ext_v_u8mf8_u8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -545,7 +491,6 @@ vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
   return vlmul_ext_v_u8mf8_u8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -555,7 +500,6 @@ vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
   return vlmul_ext_v_u8mf8_u8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -565,7 +509,6 @@ vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
   return vlmul_ext_v_u8mf8_u8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -575,7 +518,6 @@ vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
   return vlmul_ext_v_u8mf8_u8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
@@ -585,7 +527,6 @@ vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
   return vlmul_ext_v_u8mf8_u8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -595,7 +536,6 @@ vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
   return vlmul_ext_v_u8mf4_u8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -605,7 +545,6 @@ vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
   return vlmul_ext_v_u8mf4_u8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -615,7 +554,6 @@ vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
   return vlmul_ext_v_u8mf4_u8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -625,7 +563,6 @@ vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
   return vlmul_ext_v_u8mf4_u8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -635,7 +572,6 @@ vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
   return vlmul_ext_v_u8mf4_u8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -645,7 +581,6 @@ vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
   return vlmul_ext_v_u8mf2_u8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -655,7 +590,6 @@ vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
   return vlmul_ext_v_u8mf2_u8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -665,7 +599,6 @@ vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
   return vlmul_ext_v_u8mf2_u8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -675,7 +608,6 @@ vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
   return vlmul_ext_v_u8mf2_u8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -685,7 +617,6 @@ vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
   return vlmul_ext_v_u8m1_u8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -695,7 +626,6 @@ vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
   return vlmul_ext_v_u8m1_u8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -705,7 +635,6 @@ vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
   return vlmul_ext_v_u8m1_u8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -715,7 +644,6 @@ vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
   return vlmul_ext_v_u8m2_u8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -725,7 +653,6 @@ vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
   return vlmul_ext_v_u8m2_u8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -735,7 +662,6 @@ vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
   return vlmul_ext_v_u8m4_u8m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -745,7 +671,6 @@ vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
   return vlmul_ext_v_u16mf4_u16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -755,7 +680,6 @@ vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
   return vlmul_ext_v_u16mf4_u16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -765,7 +689,6 @@ vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
   return vlmul_ext_v_u16mf4_u16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -775,7 +698,6 @@ vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
   return vlmul_ext_v_u16mf4_u16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
@@ -785,7 +707,6 @@ vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
   return vlmul_ext_v_u16mf4_u16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -795,7 +716,6 @@ vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
   return vlmul_ext_v_u16mf2_u16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -805,7 +725,6 @@ vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
   return vlmul_ext_v_u16mf2_u16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -815,7 +734,6 @@ vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
   return vlmul_ext_v_u16mf2_u16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -825,7 +743,6 @@ vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
   return vlmul_ext_v_u16mf2_u16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -835,7 +752,6 @@ vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
   return vlmul_ext_v_u16m1_u16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -845,7 +761,6 @@ vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
   return vlmul_ext_v_u16m1_u16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -855,7 +770,6 @@ vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
   return vlmul_ext_v_u16m1_u16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -865,7 +779,6 @@ vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
   return vlmul_ext_v_u16m2_u16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -875,7 +788,6 @@ vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
   return vlmul_ext_v_u16m2_u16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -885,7 +797,6 @@ vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
   return vlmul_ext_v_u16m4_u16m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -895,7 +806,6 @@ vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
   return vlmul_ext_v_u32mf2_u32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -905,7 +815,6 @@ vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
   return vlmul_ext_v_u32mf2_u32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -915,7 +824,6 @@ vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
   return vlmul_ext_v_u32mf2_u32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
@@ -925,7 +833,6 @@ vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
   return vlmul_ext_v_u32mf2_u32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -935,7 +842,6 @@ vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
   return vlmul_ext_v_u32m1_u32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -945,7 +851,6 @@ vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
   return vlmul_ext_v_u32m1_u32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -955,7 +860,6 @@ vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
   return vlmul_ext_v_u32m1_u32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -965,7 +869,6 @@ vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
   return vlmul_ext_v_u32m2_u32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -975,7 +878,6 @@ vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
   return vlmul_ext_v_u32m2_u32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -985,7 +887,6 @@ vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
   return vlmul_ext_v_u32m4_u32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
@@ -995,7 +896,6 @@ vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
   return vlmul_ext_v_u64m1_u64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
@@ -1005,7 +905,6 @@ vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
   return vlmul_ext_v_u64m1_u64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
@@ -1015,7 +914,6 @@ vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
   return vlmul_ext_v_u64m1_u64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
@@ -1025,7 +923,6 @@ vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
   return vlmul_ext_v_u64m2_u64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
@@ -1035,7 +932,6 @@ vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
   return vlmul_ext_v_u64m2_u64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
@@ -1045,7 +941,6 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
   return vlmul_ext_v_u64m4_u64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.insert.nxv2f32.nxv1f32(<vscale x 2 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
@@ -1055,7 +950,6 @@ vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
   return vlmul_ext_v_f32mf2_f32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
@@ -1065,7 +959,6 @@ vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
   return vlmul_ext_v_f32mf2_f32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv1f32(<vscale x 8 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
@@ -1075,7 +968,6 @@ vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
   return vlmul_ext_v_f32mf2_f32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv1f32(<vscale x 16 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
@@ -1085,7 +977,6 @@ vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
   return vlmul_ext_v_f32mf2_f32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
@@ -1095,7 +986,6 @@ vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
   return vlmul_ext_v_f32m1_f32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
@@ -1105,7 +995,6 @@ vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
   return vlmul_ext_v_f32m1_f32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
@@ -1115,7 +1004,6 @@ vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
   return vlmul_ext_v_f32m1_f32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
@@ -1125,7 +1013,6 @@ vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
   return vlmul_ext_v_f32m2_f32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
@@ -1135,7 +1022,6 @@ vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
   return vlmul_ext_v_f32m2_f32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 0)
@@ -1145,7 +1031,6 @@ vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
   return vlmul_ext_v_f32m4_f32m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
@@ -1155,7 +1040,6 @@ vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
   return vlmul_ext_v_f64m1_f64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
@@ -1165,7 +1049,6 @@ vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
   return vlmul_ext_v_f64m1_f64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
@@ -1175,7 +1058,6 @@ vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
   return vlmul_ext_v_f64m1_f64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
@@ -1185,7 +1067,6 @@ vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
   return vlmul_ext_v_f64m2_f64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
@@ -1195,7 +1076,6 @@ vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
   return vlmul_ext_v_f64m2_f64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 0)
@@ -1205,7 +1085,6 @@ vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) {
   return vlmul_ext_v_f64m4_f64m8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -1215,7 +1094,6 @@ vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) {
   return vlmul_trunc_v_i8mf4_i8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -1225,7 +1103,6 @@ vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) {
   return vlmul_trunc_v_i8mf2_i8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -1235,7 +1112,6 @@ vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) {
   return vlmul_trunc_v_i8mf2_i8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -1245,7 +1121,6 @@ vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) {
   return vlmul_trunc_v_i8m1_i8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -1255,7 +1130,6 @@ vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) {
   return vlmul_trunc_v_i8m1_i8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -1265,7 +1139,6 @@ vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) {
   return vlmul_trunc_v_i8m1_i8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1275,7 +1148,6 @@ vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) {
   return vlmul_trunc_v_i8m2_i8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1285,7 +1157,6 @@ vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) {
   return vlmul_trunc_v_i8m2_i8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1295,7 +1166,6 @@ vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) {
   return vlmul_trunc_v_i8m2_i8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1305,7 +1175,6 @@ vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) {
   return vlmul_trunc_v_i8m2_i8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1315,7 +1184,6 @@ vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) {
   return vlmul_trunc_v_i8m4_i8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1325,7 +1193,6 @@ vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) {
   return vlmul_trunc_v_i8m4_i8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1335,7 +1202,6 @@ vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) {
   return vlmul_trunc_v_i8m4_i8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1345,7 +1211,6 @@ vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) {
   return vlmul_trunc_v_i8m4_i8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1355,7 +1220,6 @@ vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) {
   return vlmul_trunc_v_i8m4_i8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1365,7 +1229,6 @@ vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) {
   return vlmul_trunc_v_i8m8_i8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1375,7 +1238,6 @@ vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) {
   return vlmul_trunc_v_i8m8_i8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1385,7 +1247,6 @@ vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) {
   return vlmul_trunc_v_i8m8_i8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1395,7 +1256,6 @@ vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) {
   return vlmul_trunc_v_i8m8_i8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1405,7 +1265,6 @@ vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) {
   return vlmul_trunc_v_i8m8_i8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1415,7 +1274,6 @@ vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) {
   return vlmul_trunc_v_i8m8_i8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -1425,7 +1283,6 @@ vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) {
   return vlmul_trunc_v_i16mf2_i16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -1435,7 +1292,6 @@ vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) {
   return vlmul_trunc_v_i16m1_i16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -1445,7 +1301,6 @@ vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) {
   return vlmul_trunc_v_i16m1_i16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -1455,7 +1310,6 @@ vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) {
   return vlmul_trunc_v_i16m2_i16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -1465,7 +1319,6 @@ vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) {
   return vlmul_trunc_v_i16m2_i16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -1475,7 +1328,6 @@ vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) {
   return vlmul_trunc_v_i16m2_i16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -1485,7 +1337,6 @@ vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) {
   return vlmul_trunc_v_i16m4_i16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -1495,7 +1346,6 @@ vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) {
   return vlmul_trunc_v_i16m4_i16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -1505,7 +1355,6 @@ vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) {
   return vlmul_trunc_v_i16m4_i16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -1515,7 +1364,6 @@ vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) {
   return vlmul_trunc_v_i16m4_i16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -1525,7 +1373,6 @@ vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) {
   return vlmul_trunc_v_i16m8_i16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -1535,7 +1382,6 @@ vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) {
   return vlmul_trunc_v_i16m8_i16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -1545,7 +1391,6 @@ vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) {
   return vlmul_trunc_v_i16m8_i16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -1555,7 +1400,6 @@ vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) {
   return vlmul_trunc_v_i16m8_i16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -1565,7 +1409,6 @@ vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) {
   return vlmul_trunc_v_i16m8_i16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -1575,7 +1418,6 @@ vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) {
   return vlmul_trunc_v_i32m1_i32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -1585,7 +1427,6 @@ vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) {
   return vlmul_trunc_v_i32m2_i32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -1595,7 +1436,6 @@ vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) {
   return vlmul_trunc_v_i32m2_i32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -1605,7 +1445,6 @@ vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) {
   return vlmul_trunc_v_i32m4_i32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -1615,7 +1454,6 @@ vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) {
   return vlmul_trunc_v_i32m4_i32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -1625,7 +1463,6 @@ vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) {
   return vlmul_trunc_v_i32m4_i32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -1635,7 +1472,6 @@ vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) {
   return vlmul_trunc_v_i32m8_i32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -1645,7 +1481,6 @@ vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) {
   return vlmul_trunc_v_i32m8_i32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -1655,7 +1490,6 @@ vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) {
   return vlmul_trunc_v_i32m8_i32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -1665,7 +1499,6 @@ vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) {
   return vlmul_trunc_v_i32m8_i32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
@@ -1675,7 +1508,6 @@ vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) {
   return vlmul_trunc_v_i64m2_i64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
@@ -1685,7 +1517,6 @@ vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) {
   return vlmul_trunc_v_i64m4_i64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
@@ -1695,7 +1526,6 @@ vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) {
   return vlmul_trunc_v_i64m4_i64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
@@ -1705,7 +1535,6 @@ vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) {
   return vlmul_trunc_v_i64m8_i64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
@@ -1715,7 +1544,6 @@ vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) {
   return vlmul_trunc_v_i64m8_i64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
@@ -1725,7 +1553,6 @@ vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) {
   return vlmul_trunc_v_i64m8_i64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
@@ -1735,7 +1562,6 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) {
   return vlmul_trunc_v_u8mf4_u8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -1745,7 +1571,6 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) {
   return vlmul_trunc_v_u8mf2_u8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
@@ -1755,7 +1580,6 @@ vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) {
   return vlmul_trunc_v_u8mf2_u8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -1765,7 +1589,6 @@ vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) {
   return vlmul_trunc_v_u8m1_u8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -1775,7 +1598,6 @@ vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) {
   return vlmul_trunc_v_u8m1_u8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
@@ -1785,7 +1607,6 @@ vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) {
   return vlmul_trunc_v_u8m1_u8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1795,7 +1616,6 @@ vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) {
   return vlmul_trunc_v_u8m2_u8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1805,7 +1625,6 @@ vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) {
   return vlmul_trunc_v_u8m2_u8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1815,7 +1634,6 @@ vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) {
   return vlmul_trunc_v_u8m2_u8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
@@ -1825,7 +1643,6 @@ vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) {
   return vlmul_trunc_v_u8m2_u8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1835,7 +1652,6 @@ vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) {
   return vlmul_trunc_v_u8m4_u8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1845,7 +1661,6 @@ vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) {
   return vlmul_trunc_v_u8m4_u8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1855,7 +1670,6 @@ vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) {
   return vlmul_trunc_v_u8m4_u8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1865,7 +1679,6 @@ vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) {
   return vlmul_trunc_v_u8m4_u8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
@@ -1875,7 +1688,6 @@ vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) {
   return vlmul_trunc_v_u8m4_u8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1885,7 +1697,6 @@ vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) {
   return vlmul_trunc_v_u8m8_u8mf8(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1895,7 +1706,6 @@ vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) {
   return vlmul_trunc_v_u8m8_u8mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1905,7 +1715,6 @@ vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) {
   return vlmul_trunc_v_u8m8_u8mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1915,7 +1724,6 @@ vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) {
   return vlmul_trunc_v_u8m8_u8m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1925,7 +1733,6 @@ vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) {
   return vlmul_trunc_v_u8m8_u8m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
@@ -1935,7 +1742,6 @@ vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) {
   return vlmul_trunc_v_u8m8_u8m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
@@ -1945,7 +1751,6 @@ vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) {
   return vlmul_trunc_v_u16mf2_u16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -1955,7 +1760,6 @@ vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) {
   return vlmul_trunc_v_u16m1_u16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
@@ -1965,7 +1769,6 @@ vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) {
   return vlmul_trunc_v_u16m1_u16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -1975,7 +1778,6 @@ vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) {
   return vlmul_trunc_v_u16m2_u16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -1985,7 +1787,6 @@ vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) {
   return vlmul_trunc_v_u16m2_u16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
@@ -1995,7 +1796,6 @@ vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) {
   return vlmul_trunc_v_u16m2_u16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -2005,7 +1805,6 @@ vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) {
   return vlmul_trunc_v_u16m4_u16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -2015,7 +1814,6 @@ vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) {
   return vlmul_trunc_v_u16m4_u16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -2025,7 +1823,6 @@ vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) {
   return vlmul_trunc_v_u16m4_u16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
@@ -2035,7 +1832,6 @@ vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) {
   return vlmul_trunc_v_u16m4_u16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -2045,7 +1841,6 @@ vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) {
   return vlmul_trunc_v_u16m8_u16mf4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -2055,7 +1850,6 @@ vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) {
   return vlmul_trunc_v_u16m8_u16mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -2065,7 +1859,6 @@ vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) {
   return vlmul_trunc_v_u16m8_u16m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -2075,7 +1868,6 @@ vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) {
   return vlmul_trunc_v_u16m8_u16m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
@@ -2085,7 +1877,6 @@ vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) {
   return vlmul_trunc_v_u16m8_u16m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
@@ -2095,7 +1886,6 @@ vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) {
   return vlmul_trunc_v_u32m1_u32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -2105,7 +1895,6 @@ vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) {
   return vlmul_trunc_v_u32m2_u32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
@@ -2115,7 +1904,6 @@ vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) {
   return vlmul_trunc_v_u32m2_u32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -2125,7 +1913,6 @@ vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) {
   return vlmul_trunc_v_u32m4_u32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -2135,7 +1922,6 @@ vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) {
   return vlmul_trunc_v_u32m4_u32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
@@ -2145,7 +1931,6 @@ vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) {
   return vlmul_trunc_v_u32m4_u32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -2155,7 +1940,6 @@ vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) {
   return vlmul_trunc_v_u32m8_u32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -2165,7 +1949,6 @@ vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) {
   return vlmul_trunc_v_u32m8_u32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -2175,7 +1958,6 @@ vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) {
   return vlmul_trunc_v_u32m8_u32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
@@ -2185,7 +1967,6 @@ vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) {
   return vlmul_trunc_v_u32m8_u32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
@@ -2195,7 +1976,6 @@ vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) {
   return vlmul_trunc_v_u64m2_u64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
@@ -2205,7 +1985,6 @@ vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) {
   return vlmul_trunc_v_u64m4_u64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
@@ -2215,7 +1994,6 @@ vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) {
   return vlmul_trunc_v_u64m4_u64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
@@ -2225,7 +2003,6 @@ vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) {
   return vlmul_trunc_v_u64m8_u64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
@@ -2235,7 +2012,6 @@ vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) {
   return vlmul_trunc_v_u64m8_u64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
@@ -2245,7 +2021,6 @@ vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) {
   return vlmul_trunc_v_u64m8_u64m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv2f32(<vscale x 2 x float> [[OP1:%.*]], i64 0)
@@ -2255,7 +2030,6 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) {
   return vlmul_trunc_v_f32m1_f32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
@@ -2265,7 +2039,6 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) {
   return vlmul_trunc_v_f32m2_f32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
@@ -2275,7 +2048,6 @@ vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) {
   return vlmul_trunc_v_f32m2_f32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
@@ -2285,7 +2057,6 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) {
   return vlmul_trunc_v_f32m4_f32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
@@ -2295,7 +2066,6 @@ vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) {
   return vlmul_trunc_v_f32m4_f32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
@@ -2305,7 +2075,6 @@ vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) {
   return vlmul_trunc_v_f32m4_f32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
@@ -2315,7 +2084,6 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) {
   return vlmul_trunc_v_f32m8_f32mf2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
@@ -2325,7 +2093,6 @@ vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) {
   return vlmul_trunc_v_f32m8_f32m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
@@ -2335,7 +2102,6 @@ vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) {
   return vlmul_trunc_v_f32m8_f32m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
@@ -2345,7 +2111,6 @@ vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) {
   return vlmul_trunc_v_f32m8_f32m4(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], i64 0)
@@ -2355,7 +2120,6 @@ vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) {
   return vlmul_trunc_v_f64m2_f64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
@@ -2365,7 +2129,6 @@ vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) {
   return vlmul_trunc_v_f64m4_f64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
@@ -2375,7 +2138,6 @@ vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) {
   return vlmul_trunc_v_f64m4_f64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
@@ -2385,7 +2147,6 @@ vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) {
   return vlmul_trunc_v_f64m8_f64m1(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
@@ -2395,7 +2156,6 @@ vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) {
   return vlmul_trunc_v_f64m8_f64m2(op1);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
index 050cb7ddd5496..2948c337650ab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -16,7 +15,6 @@ vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl)
   return vloxei8_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -27,7 +25,6 @@ vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl)
   return vloxei8_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -38,7 +35,6 @@ vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl)
   return vloxei8_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -49,7 +45,6 @@ vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -60,7 +55,6 @@ vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -71,7 +65,6 @@ vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_i8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -82,7 +75,6 @@ vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
   return vloxei8_v_i8m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -93,7 +85,6 @@ vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl
   return vloxei16_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -104,7 +95,6 @@ vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl
   return vloxei16_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -115,7 +105,6 @@ vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl)
   return vloxei16_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -126,7 +115,6 @@ vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -137,7 +125,6 @@ vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -148,7 +135,6 @@ vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_i8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -159,7 +145,6 @@ vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl
   return vloxei32_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -170,7 +155,6 @@ vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl)
   return vloxei32_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -181,7 +165,6 @@ vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl)
   return vloxei32_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -192,7 +175,6 @@ vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -203,7 +185,6 @@ vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -214,7 +195,6 @@ vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl)
   return vloxei64_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -225,7 +205,6 @@ vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl)
   return vloxei64_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -236,7 +215,6 @@ vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl)
   return vloxei64_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -247,7 +225,6 @@ vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -258,7 +235,6 @@ vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t v
   return vloxei8_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -269,7 +245,6 @@ vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t v
   return vloxei8_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -280,7 +255,6 @@ vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl)
   return vloxei8_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -291,7 +265,6 @@ vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl)
   return vloxei8_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -302,7 +275,6 @@ vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl)
   return vloxei8_v_i16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -313,7 +285,6 @@ vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl)
   return vloxei8_v_i16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -324,7 +295,6 @@ vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -335,7 +305,6 @@ vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t
   return vloxei16_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -346,7 +315,6 @@ vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl
   return vloxei16_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -357,7 +325,6 @@ vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl
   return vloxei16_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -368,7 +335,6 @@ vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl
   return vloxei16_v_i16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -379,7 +345,6 @@ vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl
   return vloxei16_v_i16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -390,7 +355,6 @@ vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -401,7 +365,6 @@ vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t
   return vloxei32_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -412,7 +375,6 @@ vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl
   return vloxei32_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -423,7 +385,6 @@ vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl
   return vloxei32_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -434,7 +395,6 @@ vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl
   return vloxei32_v_i16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -445,7 +405,6 @@ vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t
   return vloxei64_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -456,7 +415,6 @@ vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t
   return vloxei64_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -467,7 +425,6 @@ vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl
   return vloxei64_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -478,7 +435,6 @@ vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl
   return vloxei64_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -489,7 +445,6 @@ vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t v
   return vloxei8_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -500,7 +455,6 @@ vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl)
   return vloxei8_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -511,7 +465,6 @@ vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl)
   return vloxei8_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -522,7 +475,6 @@ vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl)
   return vloxei8_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -533,7 +485,6 @@ vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl)
   return vloxei8_v_i32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -544,7 +495,6 @@ vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -555,7 +505,6 @@ vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t v
   return vloxei16_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -566,7 +515,6 @@ vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl
   return vloxei16_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -577,7 +525,6 @@ vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl
   return vloxei16_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -588,7 +535,6 @@ vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl
   return vloxei16_v_i32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -599,7 +545,6 @@ vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -610,7 +555,6 @@ vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl
   return vloxei32_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -621,7 +565,6 @@ vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl
   return vloxei32_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -632,7 +575,6 @@ vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl
   return vloxei32_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -643,7 +585,6 @@ vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl
   return vloxei32_v_i32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -654,7 +595,6 @@ vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t
   return vloxei64_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -665,7 +605,6 @@ vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl
   return vloxei64_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -676,7 +615,6 @@ vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl
   return vloxei64_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -687,7 +625,6 @@ vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl
   return vloxei64_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -698,7 +635,6 @@ vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl)
   return vloxei8_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -709,7 +645,6 @@ vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl)
   return vloxei8_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -720,7 +655,6 @@ vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl)
   return vloxei8_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -731,7 +665,6 @@ vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl)
   return vloxei8_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -742,7 +675,6 @@ vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t v
   return vloxei16_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -753,7 +685,6 @@ vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t v
   return vloxei16_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -764,7 +695,6 @@ vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl
   return vloxei16_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -775,7 +705,6 @@ vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl
   return vloxei16_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -786,7 +715,6 @@ vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t v
   return vloxei32_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -797,7 +725,6 @@ vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl
   return vloxei32_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -808,7 +735,6 @@ vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl
   return vloxei32_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -819,7 +745,6 @@ vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl
   return vloxei32_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -830,7 +755,6 @@ vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl
   return vloxei64_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -841,7 +765,6 @@ vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl
   return vloxei64_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -852,7 +775,6 @@ vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl
   return vloxei64_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -863,7 +785,6 @@ vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl
   return vloxei64_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -874,7 +795,6 @@ vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl
   return vloxei8_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -885,7 +805,6 @@ vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl
   return vloxei8_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -896,7 +815,6 @@ vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl
   return vloxei8_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -907,7 +825,6 @@ vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -918,7 +835,6 @@ vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -929,7 +845,6 @@ vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_u8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -940,7 +855,6 @@ vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
   return vloxei8_v_u8m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -951,7 +865,6 @@ vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -962,7 +875,6 @@ vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t
   return vloxei16_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -973,7 +885,6 @@ vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t v
   return vloxei16_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -984,7 +895,6 @@ vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl)
   return vloxei16_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -995,7 +905,6 @@ vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl)
   return vloxei16_v_u8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1006,7 +915,6 @@ vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl)
   return vloxei16_v_u8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1017,7 +925,6 @@ vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1028,7 +935,6 @@ vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t v
   return vloxei32_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1039,7 +945,6 @@ vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t v
   return vloxei32_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1050,7 +955,6 @@ vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl)
   return vloxei32_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1061,7 +965,6 @@ vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl)
   return vloxei32_v_u8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1072,7 +975,6 @@ vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t v
   return vloxei64_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1083,7 +985,6 @@ vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t v
   return vloxei64_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1094,7 +995,6 @@ vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t v
   return vloxei64_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1105,7 +1005,6 @@ vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl)
   return vloxei64_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1116,7 +1015,6 @@ vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t
   return vloxei8_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1127,7 +1025,6 @@ vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t
   return vloxei8_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1138,7 +1035,6 @@ vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t v
   return vloxei8_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1149,7 +1045,6 @@ vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl
   return vloxei8_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1160,7 +1055,6 @@ vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl
   return vloxei8_v_u16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1171,7 +1065,6 @@ vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl
   return vloxei8_v_u16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1182,7 +1075,6 @@ vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size
   return vloxei16_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1193,7 +1085,6 @@ vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size
   return vloxei16_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1204,7 +1095,6 @@ vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1215,7 +1105,6 @@ vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1226,7 +1115,6 @@ vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t
   return vloxei16_v_u16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1237,7 +1125,6 @@ vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t
   return vloxei16_v_u16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1248,7 +1135,6 @@ vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size
   return vloxei32_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1259,7 +1145,6 @@ vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_
   return vloxei32_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1270,7 +1155,6 @@ vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1281,7 +1165,6 @@ vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1292,7 +1175,6 @@ vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t
   return vloxei32_v_u16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1303,7 +1185,6 @@ vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_
   return vloxei64_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1314,7 +1195,6 @@ vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_
   return vloxei64_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1325,7 +1205,6 @@ vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1336,7 +1215,6 @@ vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1347,7 +1225,6 @@ vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t
   return vloxei8_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1358,7 +1235,6 @@ vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t v
   return vloxei8_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1369,7 +1245,6 @@ vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t v
   return vloxei8_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1380,7 +1255,6 @@ vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl
   return vloxei8_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1391,7 +1265,6 @@ vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl
   return vloxei8_v_u32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1402,7 +1275,6 @@ vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size
   return vloxei16_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1413,7 +1285,6 @@ vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t
   return vloxei16_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1424,7 +1295,6 @@ vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1435,7 +1305,6 @@ vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1446,7 +1315,6 @@ vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t
   return vloxei16_v_u32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1457,7 +1325,6 @@ vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size
   return vloxei32_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1468,7 +1335,6 @@ vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t
   return vloxei32_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1479,7 +1345,6 @@ vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1490,7 +1355,6 @@ vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1501,7 +1365,6 @@ vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t
   return vloxei32_v_u32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1512,7 +1375,6 @@ vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_
   return vloxei64_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1523,7 +1385,6 @@ vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t
   return vloxei64_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1534,7 +1395,6 @@ vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1545,7 +1405,6 @@ vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1556,7 +1415,6 @@ vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t v
   return vloxei8_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1567,7 +1425,6 @@ vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t v
   return vloxei8_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1578,7 +1435,6 @@ vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t v
   return vloxei8_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1589,7 +1445,6 @@ vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl
   return vloxei8_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1600,7 +1455,6 @@ vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1611,7 +1465,6 @@ vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t
   return vloxei16_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1622,7 +1475,6 @@ vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1633,7 +1485,6 @@ vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1644,7 +1495,6 @@ vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1655,7 +1505,6 @@ vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t
   return vloxei32_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1666,7 +1515,6 @@ vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1677,7 +1525,6 @@ vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1688,7 +1535,6 @@ vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t
   return vloxei64_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1699,7 +1545,6 @@ vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t
   return vloxei64_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1710,7 +1555,6 @@ vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1721,7 +1565,6 @@ vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1732,7 +1575,6 @@ vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t v
   return vloxei8_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1743,7 +1585,6 @@ vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl)
   return vloxei8_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1754,7 +1595,6 @@ vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl)
   return vloxei8_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1765,7 +1605,6 @@ vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl)
   return vloxei8_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1776,7 +1615,6 @@ vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl)
   return vloxei8_v_f32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1787,7 +1625,6 @@ vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1798,7 +1635,6 @@ vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t v
   return vloxei16_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1809,7 +1645,6 @@ vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl
   return vloxei16_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1820,7 +1655,6 @@ vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl
   return vloxei16_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1831,7 +1665,6 @@ vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl
   return vloxei16_v_f32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1842,7 +1675,6 @@ vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1853,7 +1685,6 @@ vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl
   return vloxei32_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1864,7 +1695,6 @@ vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl
   return vloxei32_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1875,7 +1705,6 @@ vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl
   return vloxei32_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1886,7 +1715,6 @@ vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl
   return vloxei32_v_f32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1897,7 +1725,6 @@ vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t
   return vloxei64_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1908,7 +1735,6 @@ vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl
   return vloxei64_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1919,7 +1745,6 @@ vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl
   return vloxei64_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1930,7 +1755,6 @@ vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl
   return vloxei64_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1941,7 +1765,6 @@ vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl
   return vloxei8_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1952,7 +1775,6 @@ vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl
   return vloxei8_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1963,7 +1785,6 @@ vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl
   return vloxei8_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1974,7 +1795,6 @@ vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl)
   return vloxei8_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1985,7 +1805,6 @@ vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1996,7 +1815,6 @@ vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t
   return vloxei16_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2007,7 +1825,6 @@ vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t v
   return vloxei16_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2018,7 +1835,6 @@ vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t v
   return vloxei16_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2029,7 +1845,6 @@ vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2040,7 +1855,6 @@ vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t v
   return vloxei32_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2051,7 +1865,6 @@ vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t v
   return vloxei32_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2062,7 +1875,6 @@ vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t v
   return vloxei32_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2073,7 +1885,6 @@ vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t v
   return vloxei64_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2084,7 +1895,6 @@ vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t v
   return vloxei64_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2095,7 +1905,6 @@ vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t v
   return vloxei64_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2106,7 +1915,6 @@ vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t v
   return vloxei64_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2117,7 +1925,6 @@ vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_
   return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2128,7 +1935,6 @@ vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_
   return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2139,7 +1945,6 @@ vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_
   return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2150,7 +1955,6 @@ vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *b
   return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2161,7 +1965,6 @@ vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *b
   return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2172,7 +1975,6 @@ vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *b
   return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -2183,7 +1985,6 @@ vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *b
   return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2194,7 +1995,6 @@ vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
   return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2205,7 +2005,6 @@ vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
   return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2216,7 +2015,6 @@ vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
   return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2227,7 +2025,6 @@ vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
   return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2238,7 +2035,6 @@ vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
   return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2249,7 +2045,6 @@ vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *
   return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2260,7 +2055,6 @@ vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
   return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2271,7 +2065,6 @@ vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
   return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2282,7 +2075,6 @@ vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
   return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2293,7 +2085,6 @@ vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
   return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2304,7 +2095,6 @@ vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
   return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2315,7 +2105,6 @@ vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
   return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2326,7 +2115,6 @@ vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
   return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2337,7 +2125,6 @@ vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
   return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2348,7 +2135,6 @@ vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
   return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2359,7 +2145,6 @@ vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const in
   return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2370,7 +2155,6 @@ vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const in
   return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2381,7 +2165,6 @@ vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16
   return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2392,7 +2175,6 @@ vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_
   return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2403,7 +2185,6 @@ vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_
   return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2414,7 +2195,6 @@ vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_
   return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2425,7 +2205,6 @@ vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
   return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2436,7 +2215,6 @@ vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
   return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2447,7 +2225,6 @@ vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
   return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2458,7 +2235,6 @@ vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
   return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2469,7 +2245,6 @@ vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
   return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2480,7 +2255,6 @@ vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16
   return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2491,7 +2265,6 @@ vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
   return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2502,7 +2275,6 @@ vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
   return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2513,7 +2285,6 @@ vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
   return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2524,7 +2295,6 @@ vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
   return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2535,7 +2305,6 @@ vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
   return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2546,7 +2315,6 @@ vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
   return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2557,7 +2325,6 @@ vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
   return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2568,7 +2335,6 @@ vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
   return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2579,7 +2345,6 @@ vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
   return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2590,7 +2355,6 @@ vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const in
   return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2601,7 +2365,6 @@ vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32
   return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2612,7 +2375,6 @@ vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32
   return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2623,7 +2385,6 @@ vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_
   return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2634,7 +2395,6 @@ vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_
   return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2645,7 +2405,6 @@ vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
   return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2656,7 +2415,6 @@ vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
   return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2667,7 +2425,6 @@ vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
   return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2678,7 +2435,6 @@ vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
   return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2689,7 +2445,6 @@ vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
   return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2700,7 +2455,6 @@ vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
   return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2711,7 +2465,6 @@ vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
   return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2722,7 +2475,6 @@ vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
   return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2733,7 +2485,6 @@ vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
   return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2744,7 +2495,6 @@ vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
   return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2755,7 +2505,6 @@ vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
   return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2766,7 +2515,6 @@ vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
   return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2777,7 +2525,6 @@ vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
   return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2788,7 +2535,6 @@ vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
   return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2799,7 +2545,6 @@ vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64
   return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2810,7 +2555,6 @@ vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64
   return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2821,7 +2565,6 @@ vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64
   return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2832,7 +2575,6 @@ vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_
   return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2843,7 +2585,6 @@ vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
   return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2854,7 +2595,6 @@ vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
   return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2865,7 +2605,6 @@ vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
   return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2876,7 +2615,6 @@ vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
   return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2887,7 +2625,6 @@ vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
   return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2898,7 +2635,6 @@ vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
   return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2909,7 +2645,6 @@ vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
   return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2920,7 +2655,6 @@ vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
   return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2931,7 +2665,6 @@ vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
   return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2942,7 +2675,6 @@ vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
   return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2953,7 +2685,6 @@ vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
   return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2964,7 +2695,6 @@ vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
   return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2975,7 +2705,6 @@ vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uin
   return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2986,7 +2715,6 @@ vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uin
   return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2997,7 +2725,6 @@ vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uin
   return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3008,7 +2735,6 @@ vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t
   return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3019,7 +2745,6 @@ vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t
   return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3030,7 +2755,6 @@ vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t
   return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -3041,7 +2765,6 @@ vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t
   return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3052,7 +2775,6 @@ vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
   return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3063,7 +2785,6 @@ vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
   return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3074,7 +2795,6 @@ vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
   return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3085,7 +2805,6 @@ vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
   return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3096,7 +2815,6 @@ vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
   return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3107,7 +2825,6 @@ vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_
   return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3118,7 +2835,6 @@ vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
   return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3129,7 +2845,6 @@ vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
   return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3140,7 +2855,6 @@ vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
   return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3151,7 +2865,6 @@ vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
   return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3162,7 +2875,6 @@ vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
   return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3173,7 +2885,6 @@ vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
   return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3184,7 +2895,6 @@ vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
   return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3195,7 +2905,6 @@ vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
   return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3206,7 +2915,6 @@ vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
   return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3217,7 +2925,6 @@ vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3228,7 +2935,6 @@ vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3239,7 +2945,6 @@ vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uin
   return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3250,7 +2955,6 @@ vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint
   return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3261,7 +2965,6 @@ vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint
   return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3272,7 +2975,6 @@ vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint
   return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3283,7 +2985,6 @@ vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3294,7 +2995,6 @@ vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3305,7 +3005,6 @@ vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
   return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3316,7 +3015,6 @@ vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
   return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3327,7 +3025,6 @@ vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
   return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3338,7 +3035,6 @@ vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uin
   return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3349,7 +3045,6 @@ vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3360,7 +3055,6 @@ vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3371,7 +3065,6 @@ vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
   return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3382,7 +3075,6 @@ vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
   return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3393,7 +3085,6 @@ vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
   return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3404,7 +3095,6 @@ vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3415,7 +3105,6 @@ vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3426,7 +3115,6 @@ vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
   return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3437,7 +3125,6 @@ vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
   return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3448,7 +3135,6 @@ vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3459,7 +3145,6 @@ vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uin
   return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3470,7 +3155,6 @@ vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uin
   return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3481,7 +3165,6 @@ vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint
   return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3492,7 +3175,6 @@ vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint
   return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3503,7 +3185,6 @@ vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3514,7 +3195,6 @@ vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
   return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3525,7 +3205,6 @@ vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
   return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3536,7 +3215,6 @@ vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
   return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3547,7 +3225,6 @@ vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
   return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3558,7 +3235,6 @@ vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3569,7 +3245,6 @@ vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
   return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3580,7 +3255,6 @@ vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
   return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3591,7 +3265,6 @@ vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
   return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3602,7 +3275,6 @@ vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
   return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3613,7 +3285,6 @@ vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3624,7 +3295,6 @@ vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
   return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3635,7 +3305,6 @@ vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
   return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3646,7 +3315,6 @@ vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
   return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3657,7 +3325,6 @@ vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uin
   return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3668,7 +3335,6 @@ vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uin
   return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3679,7 +3345,6 @@ vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uin
   return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3690,7 +3355,6 @@ vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint
   return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3701,7 +3365,6 @@ vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
   return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3712,7 +3375,6 @@ vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
   return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3723,7 +3385,6 @@ vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
   return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3734,7 +3395,6 @@ vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
   return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3745,7 +3405,6 @@ vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
   return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3756,7 +3415,6 @@ vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
   return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3767,7 +3425,6 @@ vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
   return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3778,7 +3435,6 @@ vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
   return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3789,7 +3445,6 @@ vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
   return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3800,7 +3455,6 @@ vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
   return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3811,7 +3465,6 @@ vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
   return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3822,7 +3475,6 @@ vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
   return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3833,7 +3485,6 @@ vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, cons
   return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -3844,7 +3495,6 @@ vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const f
   return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -3855,7 +3505,6 @@ vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const f
   return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -3866,7 +3515,6 @@ vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const fl
   return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -3877,7 +3525,6 @@ vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const fl
   return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3888,7 +3535,6 @@ vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
   return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -3899,7 +3545,6 @@ vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
   return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -3910,7 +3555,6 @@ vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
   return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -3921,7 +3565,6 @@ vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
   return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -3932,7 +3575,6 @@ vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
   return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3943,7 +3585,6 @@ vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
   return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -3954,7 +3595,6 @@ vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
   return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -3965,7 +3605,6 @@ vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
   return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -3976,7 +3615,6 @@ vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
   return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -3987,7 +3625,6 @@ vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
   return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3998,7 +3635,6 @@ vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
   return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4009,7 +3645,6 @@ vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
   return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4020,7 +3655,6 @@ vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
   return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4031,7 +3665,6 @@ vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
   return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4042,7 +3675,6 @@ vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const d
   return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4053,7 +3685,6 @@ vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const d
   return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4064,7 +3695,6 @@ vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const d
   return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4075,7 +3705,6 @@ vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const do
   return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4086,7 +3715,6 @@ vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
   return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4097,7 +3725,6 @@ vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
   return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4108,7 +3735,6 @@ vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
   return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4119,7 +3745,6 @@ vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
   return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4130,7 +3755,6 @@ vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
   return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4141,7 +3765,6 @@ vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
   return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4152,7 +3775,6 @@ vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
   return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4163,7 +3785,6 @@ vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
   return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4174,7 +3795,6 @@ vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
   return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4185,7 +3805,6 @@ vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
   return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4196,7 +3815,6 @@ vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
   return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
index 4e293fe366cf7..d31df14d6b0c4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -17,7 +16,6 @@ vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptr
diff _t bstride,
   return vlse8_v_i8mf8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -29,7 +27,6 @@ vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptr
diff _t bstride,
   return vlse8_v_i8mf4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -41,7 +38,6 @@ vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptr
diff _t bstride,
   return vlse8_v_i8mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -52,7 +48,6 @@ vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptr
diff _t bstride, size_t vl) {
   return vlse8_v_i8m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -63,7 +58,6 @@ vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptr
diff _t bstride, size_t vl) {
   return vlse8_v_i8m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -74,7 +68,6 @@ vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptr
diff _t bstride, size_t vl) {
   return vlse8_v_i8m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -85,7 +78,6 @@ vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptr
diff _t bstride, size_t vl) {
   return vlse8_v_i8m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -97,7 +89,6 @@ vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptr
diff _t bstride,
   return vlse16_v_i16mf4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -109,7 +100,6 @@ vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptr
diff _t bstride,
   return vlse16_v_i16mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -121,7 +111,6 @@ vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptr
diff _t bstride,
   return vlse16_v_i16m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -133,7 +122,6 @@ vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptr
diff _t bstride,
   return vlse16_v_i16m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -145,7 +133,6 @@ vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptr
diff _t bstride,
   return vlse16_v_i16m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -157,7 +144,6 @@ vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptr
diff _t bstride,
   return vlse16_v_i16m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -169,7 +155,6 @@ vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptr
diff _t bstride,
   return vlse32_v_i32mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -181,7 +166,6 @@ vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptr
diff _t bstride,
   return vlse32_v_i32m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -193,7 +177,6 @@ vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptr
diff _t bstride,
   return vlse32_v_i32m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -205,7 +188,6 @@ vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptr
diff _t bstride,
   return vlse32_v_i32m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -217,7 +199,6 @@ vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptr
diff _t bstride,
   return vlse32_v_i32m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -229,7 +210,6 @@ vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptr
diff _t bstride,
   return vlse64_v_i64m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -241,7 +221,6 @@ vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptr
diff _t bstride,
   return vlse64_v_i64m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -253,7 +232,6 @@ vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptr
diff _t bstride,
   return vlse64_v_i64m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -265,7 +243,6 @@ vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptr
diff _t bstride,
   return vlse64_v_i64m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -277,7 +254,6 @@ vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8mf8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -289,7 +265,6 @@ vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8mf4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -301,7 +276,6 @@ vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -313,7 +287,6 @@ vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -325,7 +298,6 @@ vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -337,7 +309,6 @@ vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -349,7 +320,6 @@ vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptr
diff _t bstride,
   return vlse8_v_u8m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -361,7 +331,6 @@ vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptr
diff _t bstride,
   return vlse16_v_u16mf4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -373,7 +342,6 @@ vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptr
diff _t bstride,
   return vlse16_v_u16mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -385,7 +353,6 @@ vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptr
diff _t bstride,
   return vlse16_v_u16m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -397,7 +364,6 @@ vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptr
diff _t bstride,
   return vlse16_v_u16m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -409,7 +375,6 @@ vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptr
diff _t bstride,
   return vlse16_v_u16m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -421,7 +386,6 @@ vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptr
diff _t bstride,
   return vlse16_v_u16m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -433,7 +397,6 @@ vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptr
diff _t bstride,
   return vlse32_v_u32mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -445,7 +408,6 @@ vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptr
diff _t bstride,
   return vlse32_v_u32m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -457,7 +419,6 @@ vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptr
diff _t bstride,
   return vlse32_v_u32m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -469,7 +430,6 @@ vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptr
diff _t bstride,
   return vlse32_v_u32m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -481,7 +441,6 @@ vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptr
diff _t bstride,
   return vlse32_v_u32m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -493,7 +452,6 @@ vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptr
diff _t bstride,
   return vlse64_v_u64m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -505,7 +463,6 @@ vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptr
diff _t bstride,
   return vlse64_v_u64m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -517,7 +474,6 @@ vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptr
diff _t bstride,
   return vlse64_v_u64m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -529,7 +485,6 @@ vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptr
diff _t bstride,
   return vlse64_v_u64m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -541,7 +496,6 @@ vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptr
diff _t bstride,
   return vlse32_v_f32mf2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -553,7 +507,6 @@ vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptr
diff _t bstride,
   return vlse32_v_f32m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -565,7 +518,6 @@ vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptr
diff _t bstride,
   return vlse32_v_f32m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -577,7 +529,6 @@ vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptr
diff _t bstride,
   return vlse32_v_f32m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -589,7 +540,6 @@ vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptr
diff _t bstride,
   return vlse32_v_f32m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -601,7 +551,6 @@ vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptr
diff _t bstride,
   return vlse64_v_f64m1(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -613,7 +562,6 @@ vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptr
diff _t bstride,
   return vlse64_v_f64m2(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -625,7 +573,6 @@ vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptr
diff _t bstride,
   return vlse64_v_f64m4(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -637,7 +584,6 @@ vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptr
diff _t bstride,
   return vlse64_v_f64m8(base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -650,7 +596,6 @@ vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vlse8_v_i8mf8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -663,7 +608,6 @@ vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vlse8_v_i8mf4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -676,7 +620,6 @@ vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vlse8_v_i8mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -689,7 +632,6 @@ vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vlse8_v_i8m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -702,7 +644,6 @@ vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vlse8_v_i8m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -715,7 +656,6 @@ vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vlse8_v_i8m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -728,7 +668,6 @@ vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vlse8_v_i8m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -741,7 +680,6 @@ vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vlse16_v_i16mf4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -754,7 +692,6 @@ vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vlse16_v_i16mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -767,7 +704,6 @@ vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vlse16_v_i16m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -780,7 +716,6 @@ vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vlse16_v_i16m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -793,7 +728,6 @@ vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vlse16_v_i16m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -806,7 +740,6 @@ vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vlse16_v_i16m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -819,7 +752,6 @@ vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vlse32_v_i32mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -832,7 +764,6 @@ vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vlse32_v_i32m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -845,7 +776,6 @@ vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vlse32_v_i32m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -858,7 +788,6 @@ vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vlse32_v_i32m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -871,7 +800,6 @@ vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vlse32_v_i32m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -884,7 +812,6 @@ vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vlse64_v_i64m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -897,7 +824,6 @@ vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vlse64_v_i64m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -910,7 +836,6 @@ vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vlse64_v_i64m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -923,7 +848,6 @@ vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vlse64_v_i64m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -936,7 +860,6 @@ vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vlse8_v_u8mf8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -949,7 +872,6 @@ vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vlse8_v_u8mf4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -962,7 +884,6 @@ vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vlse8_v_u8mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -975,7 +896,6 @@ vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vlse8_v_u8m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -988,7 +908,6 @@ vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vlse8_v_u8m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1001,7 +920,6 @@ vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vlse8_v_u8m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -1014,7 +932,6 @@ vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vlse8_v_u8m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1027,7 +944,6 @@ vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vlse16_v_u16mf4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1040,7 +956,6 @@ vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vlse16_v_u16mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1053,7 +968,6 @@ vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vlse16_v_u16m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1066,7 +980,6 @@ vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vlse16_v_u16m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1079,7 +992,6 @@ vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vlse16_v_u16m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1092,7 +1004,6 @@ vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vlse16_v_u16m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1105,7 +1016,6 @@ vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vlse32_v_u32mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1118,7 +1028,6 @@ vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vlse32_v_u32m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1131,7 +1040,6 @@ vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vlse32_v_u32m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1144,7 +1052,6 @@ vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vlse32_v_u32m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1157,7 +1064,6 @@ vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vlse32_v_u32m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1170,7 +1076,6 @@ vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vlse64_v_u64m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1183,7 +1088,6 @@ vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vlse64_v_u64m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1196,7 +1100,6 @@ vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vlse64_v_u64m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1209,7 +1112,6 @@ vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vlse64_v_u64m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1222,7 +1124,6 @@ vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vlse32_v_f32mf2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1235,7 +1136,6 @@ vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vlse32_v_f32m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1248,7 +1148,6 @@ vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vlse32_v_f32m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1261,7 +1160,6 @@ vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vlse32_v_f32m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1274,7 +1172,6 @@ vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vlse32_v_f32m8_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1287,7 +1184,6 @@ vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vlse64_v_f64m1_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1300,7 +1196,6 @@ vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vlse64_v_f64m2_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1313,7 +1208,6 @@ vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vlse64_v_f64m4_m(mask, maskedoff, base, bstride, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
index 8f7175ba112ca..8c71e0ec82db9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -16,7 +15,6 @@ vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl)
   return vluxei8_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -27,7 +25,6 @@ vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl)
   return vluxei8_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -38,7 +35,6 @@ vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl)
   return vluxei8_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -49,7 +45,6 @@ vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -60,7 +55,6 @@ vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -71,7 +65,6 @@ vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_i8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -82,7 +75,6 @@ vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
   return vluxei8_v_i8m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -93,7 +85,6 @@ vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl
   return vluxei16_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -104,7 +95,6 @@ vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl
   return vluxei16_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -115,7 +105,6 @@ vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl)
   return vluxei16_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -126,7 +115,6 @@ vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -137,7 +125,6 @@ vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -148,7 +135,6 @@ vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_i8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -159,7 +145,6 @@ vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl
   return vluxei32_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -170,7 +155,6 @@ vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl)
   return vluxei32_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -181,7 +165,6 @@ vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl)
   return vluxei32_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -192,7 +175,6 @@ vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -203,7 +185,6 @@ vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -214,7 +195,6 @@ vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl)
   return vluxei64_v_i8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -225,7 +205,6 @@ vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl)
   return vluxei64_v_i8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -236,7 +215,6 @@ vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl)
   return vluxei64_v_i8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -247,7 +225,6 @@ vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -258,7 +235,6 @@ vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t v
   return vluxei8_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -269,7 +245,6 @@ vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t v
   return vluxei8_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -280,7 +255,6 @@ vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl)
   return vluxei8_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -291,7 +265,6 @@ vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl)
   return vluxei8_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -302,7 +275,6 @@ vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl)
   return vluxei8_v_i16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -313,7 +285,6 @@ vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl)
   return vluxei8_v_i16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -324,7 +295,6 @@ vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -335,7 +305,6 @@ vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t
   return vluxei16_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -346,7 +315,6 @@ vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl
   return vluxei16_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -357,7 +325,6 @@ vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl
   return vluxei16_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -368,7 +335,6 @@ vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl
   return vluxei16_v_i16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -379,7 +345,6 @@ vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl
   return vluxei16_v_i16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -390,7 +355,6 @@ vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -401,7 +365,6 @@ vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t
   return vluxei32_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -412,7 +375,6 @@ vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl
   return vluxei32_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -423,7 +385,6 @@ vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl
   return vluxei32_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -434,7 +395,6 @@ vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl
   return vluxei32_v_i16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -445,7 +405,6 @@ vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t
   return vluxei64_v_i16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -456,7 +415,6 @@ vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t
   return vluxei64_v_i16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -467,7 +425,6 @@ vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl
   return vluxei64_v_i16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -478,7 +435,6 @@ vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl
   return vluxei64_v_i16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -489,7 +445,6 @@ vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t v
   return vluxei8_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -500,7 +455,6 @@ vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl)
   return vluxei8_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -511,7 +465,6 @@ vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl)
   return vluxei8_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -522,7 +475,6 @@ vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl)
   return vluxei8_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -533,7 +485,6 @@ vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl)
   return vluxei8_v_i32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -544,7 +495,6 @@ vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -555,7 +505,6 @@ vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t v
   return vluxei16_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -566,7 +515,6 @@ vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl
   return vluxei16_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -577,7 +525,6 @@ vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl
   return vluxei16_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -588,7 +535,6 @@ vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl
   return vluxei16_v_i32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -599,7 +545,6 @@ vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -610,7 +555,6 @@ vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl
   return vluxei32_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -621,7 +565,6 @@ vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl
   return vluxei32_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -632,7 +575,6 @@ vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl
   return vluxei32_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -643,7 +585,6 @@ vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl
   return vluxei32_v_i32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -654,7 +595,6 @@ vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t
   return vluxei64_v_i32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -665,7 +605,6 @@ vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl
   return vluxei64_v_i32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -676,7 +615,6 @@ vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl
   return vluxei64_v_i32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -687,7 +625,6 @@ vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl
   return vluxei64_v_i32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -698,7 +635,6 @@ vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl)
   return vluxei8_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -709,7 +645,6 @@ vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl)
   return vluxei8_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -720,7 +655,6 @@ vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl)
   return vluxei8_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -731,7 +665,6 @@ vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl)
   return vluxei8_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -742,7 +675,6 @@ vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t v
   return vluxei16_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -753,7 +685,6 @@ vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t v
   return vluxei16_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -764,7 +695,6 @@ vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl
   return vluxei16_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -775,7 +705,6 @@ vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl
   return vluxei16_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -786,7 +715,6 @@ vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t v
   return vluxei32_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -797,7 +725,6 @@ vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl
   return vluxei32_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -808,7 +735,6 @@ vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl
   return vluxei32_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -819,7 +745,6 @@ vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl
   return vluxei32_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -830,7 +755,6 @@ vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl
   return vluxei64_v_i64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -841,7 +765,6 @@ vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl
   return vluxei64_v_i64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -852,7 +775,6 @@ vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl
   return vluxei64_v_i64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -863,7 +785,6 @@ vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl
   return vluxei64_v_i64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -874,7 +795,6 @@ vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl
   return vluxei8_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -885,7 +805,6 @@ vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl
   return vluxei8_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -896,7 +815,6 @@ vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl
   return vluxei8_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -907,7 +825,6 @@ vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -918,7 +835,6 @@ vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -929,7 +845,6 @@ vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_u8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -940,7 +855,6 @@ vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
   return vluxei8_v_u8m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -951,7 +865,6 @@ vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -962,7 +875,6 @@ vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t
   return vluxei16_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -973,7 +885,6 @@ vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t v
   return vluxei16_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -984,7 +895,6 @@ vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl)
   return vluxei16_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -995,7 +905,6 @@ vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl)
   return vluxei16_v_u8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1006,7 +915,6 @@ vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl)
   return vluxei16_v_u8m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1017,7 +925,6 @@ vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1028,7 +935,6 @@ vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t v
   return vluxei32_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1039,7 +945,6 @@ vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t v
   return vluxei32_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1050,7 +955,6 @@ vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl)
   return vluxei32_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1061,7 +965,6 @@ vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl)
   return vluxei32_v_u8m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1072,7 +975,6 @@ vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t v
   return vluxei64_v_u8mf8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1083,7 +985,6 @@ vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t v
   return vluxei64_v_u8mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1094,7 +995,6 @@ vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t v
   return vluxei64_v_u8mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1105,7 +1005,6 @@ vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl)
   return vluxei64_v_u8m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1116,7 +1015,6 @@ vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t
   return vluxei8_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1127,7 +1025,6 @@ vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t
   return vluxei8_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1138,7 +1035,6 @@ vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t v
   return vluxei8_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1149,7 +1045,6 @@ vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl
   return vluxei8_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1160,7 +1055,6 @@ vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl
   return vluxei8_v_u16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1171,7 +1065,6 @@ vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl
   return vluxei8_v_u16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1182,7 +1075,6 @@ vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size
   return vluxei16_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1193,7 +1085,6 @@ vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size
   return vluxei16_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1204,7 +1095,6 @@ vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1215,7 +1105,6 @@ vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1226,7 +1115,6 @@ vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t
   return vluxei16_v_u16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1237,7 +1125,6 @@ vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t
   return vluxei16_v_u16m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1248,7 +1135,6 @@ vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size
   return vluxei32_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1259,7 +1145,6 @@ vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_
   return vluxei32_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1270,7 +1155,6 @@ vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1281,7 +1165,6 @@ vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1292,7 +1175,6 @@ vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t
   return vluxei32_v_u16m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1303,7 +1185,6 @@ vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_
   return vluxei64_v_u16mf4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1314,7 +1195,6 @@ vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_
   return vluxei64_v_u16mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1325,7 +1205,6 @@ vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_u16m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1336,7 +1215,6 @@ vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_u16m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1347,7 +1225,6 @@ vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t
   return vluxei8_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1358,7 +1235,6 @@ vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t v
   return vluxei8_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1369,7 +1245,6 @@ vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t v
   return vluxei8_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1380,7 +1255,6 @@ vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl
   return vluxei8_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1391,7 +1265,6 @@ vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl
   return vluxei8_v_u32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1402,7 +1275,6 @@ vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size
   return vluxei16_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1413,7 +1285,6 @@ vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t
   return vluxei16_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1424,7 +1295,6 @@ vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1435,7 +1305,6 @@ vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1446,7 +1315,6 @@ vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t
   return vluxei16_v_u32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1457,7 +1325,6 @@ vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size
   return vluxei32_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1468,7 +1335,6 @@ vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t
   return vluxei32_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1479,7 +1345,6 @@ vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1490,7 +1355,6 @@ vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1501,7 +1365,6 @@ vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t
   return vluxei32_v_u32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1512,7 +1375,6 @@ vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_
   return vluxei64_v_u32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1523,7 +1385,6 @@ vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t
   return vluxei64_v_u32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1534,7 +1395,6 @@ vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_u32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1545,7 +1405,6 @@ vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_u32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1556,7 +1415,6 @@ vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t v
   return vluxei8_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1567,7 +1425,6 @@ vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t v
   return vluxei8_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1578,7 +1435,6 @@ vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t v
   return vluxei8_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1589,7 +1445,6 @@ vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl
   return vluxei8_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1600,7 +1455,6 @@ vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1611,7 +1465,6 @@ vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t
   return vluxei16_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1622,7 +1475,6 @@ vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1633,7 +1485,6 @@ vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1644,7 +1495,6 @@ vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1655,7 +1505,6 @@ vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t
   return vluxei32_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1666,7 +1515,6 @@ vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1677,7 +1525,6 @@ vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1688,7 +1535,6 @@ vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t
   return vluxei64_v_u64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1699,7 +1545,6 @@ vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t
   return vluxei64_v_u64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1710,7 +1555,6 @@ vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_u64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1721,7 +1565,6 @@ vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_u64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1732,7 +1575,6 @@ vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t v
   return vluxei8_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1743,7 +1585,6 @@ vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl)
   return vluxei8_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1754,7 +1595,6 @@ vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl)
   return vluxei8_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1765,7 +1605,6 @@ vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl)
   return vluxei8_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1776,7 +1615,6 @@ vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl)
   return vluxei8_v_f32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1787,7 +1625,6 @@ vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1798,7 +1635,6 @@ vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t v
   return vluxei16_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1809,7 +1645,6 @@ vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl
   return vluxei16_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1820,7 +1655,6 @@ vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl
   return vluxei16_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1831,7 +1665,6 @@ vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl
   return vluxei16_v_f32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1842,7 +1675,6 @@ vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1853,7 +1685,6 @@ vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl
   return vluxei32_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1864,7 +1695,6 @@ vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl
   return vluxei32_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1875,7 +1705,6 @@ vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl
   return vluxei32_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1886,7 +1715,6 @@ vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl
   return vluxei32_v_f32m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1897,7 +1725,6 @@ vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t
   return vluxei64_v_f32mf2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1908,7 +1735,6 @@ vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl
   return vluxei64_v_f32m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1919,7 +1745,6 @@ vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl
   return vluxei64_v_f32m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1930,7 +1755,6 @@ vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl
   return vluxei64_v_f32m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1941,7 +1765,6 @@ vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl
   return vluxei8_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1952,7 +1775,6 @@ vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl
   return vluxei8_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1963,7 +1785,6 @@ vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl
   return vluxei8_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1974,7 +1795,6 @@ vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl)
   return vluxei8_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1985,7 +1805,6 @@ vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1996,7 +1815,6 @@ vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t
   return vluxei16_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2007,7 +1825,6 @@ vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t v
   return vluxei16_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2018,7 +1835,6 @@ vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t v
   return vluxei16_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2029,7 +1845,6 @@ vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2040,7 +1855,6 @@ vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t v
   return vluxei32_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2051,7 +1865,6 @@ vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t v
   return vluxei32_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2062,7 +1875,6 @@ vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t v
   return vluxei32_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2073,7 +1885,6 @@ vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t v
   return vluxei64_v_f64m1(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2084,7 +1895,6 @@ vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t v
   return vluxei64_v_f64m2(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2095,7 +1905,6 @@ vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t v
   return vluxei64_v_f64m4(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2106,7 +1915,6 @@ vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t v
   return vluxei64_v_f64m8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2117,7 +1925,6 @@ vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_
   return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2128,7 +1935,6 @@ vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_
   return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2139,7 +1945,6 @@ vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_
   return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2150,7 +1955,6 @@ vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *b
   return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2161,7 +1965,6 @@ vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *b
   return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2172,7 +1975,6 @@ vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *b
   return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -2183,7 +1985,6 @@ vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *b
   return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2194,7 +1995,6 @@ vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
   return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2205,7 +2005,6 @@ vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
   return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2216,7 +2015,6 @@ vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
   return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2227,7 +2025,6 @@ vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
   return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2238,7 +2035,6 @@ vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
   return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2249,7 +2045,6 @@ vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *
   return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2260,7 +2055,6 @@ vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
   return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2271,7 +2065,6 @@ vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
   return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2282,7 +2075,6 @@ vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
   return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2293,7 +2085,6 @@ vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
   return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2304,7 +2095,6 @@ vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
   return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2315,7 +2105,6 @@ vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
   return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2326,7 +2115,6 @@ vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
   return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2337,7 +2125,6 @@ vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
   return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2348,7 +2135,6 @@ vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
   return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2359,7 +2145,6 @@ vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const in
   return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2370,7 +2155,6 @@ vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const in
   return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2381,7 +2165,6 @@ vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16
   return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2392,7 +2175,6 @@ vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_
   return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2403,7 +2185,6 @@ vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_
   return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2414,7 +2195,6 @@ vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_
   return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2425,7 +2205,6 @@ vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
   return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2436,7 +2215,6 @@ vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
   return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2447,7 +2225,6 @@ vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
   return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2458,7 +2235,6 @@ vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
   return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2469,7 +2245,6 @@ vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
   return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2480,7 +2255,6 @@ vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16
   return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2491,7 +2265,6 @@ vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
   return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2502,7 +2275,6 @@ vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
   return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2513,7 +2285,6 @@ vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
   return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2524,7 +2295,6 @@ vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
   return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2535,7 +2305,6 @@ vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
   return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2546,7 +2315,6 @@ vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
   return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2557,7 +2325,6 @@ vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
   return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2568,7 +2335,6 @@ vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
   return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2579,7 +2345,6 @@ vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
   return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2590,7 +2355,6 @@ vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const in
   return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2601,7 +2365,6 @@ vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32
   return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2612,7 +2375,6 @@ vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32
   return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2623,7 +2385,6 @@ vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_
   return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2634,7 +2395,6 @@ vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_
   return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2645,7 +2405,6 @@ vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
   return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2656,7 +2415,6 @@ vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
   return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2667,7 +2425,6 @@ vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
   return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2678,7 +2435,6 @@ vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
   return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2689,7 +2445,6 @@ vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
   return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2700,7 +2455,6 @@ vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
   return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2711,7 +2465,6 @@ vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
   return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2722,7 +2475,6 @@ vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
   return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2733,7 +2485,6 @@ vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
   return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2744,7 +2495,6 @@ vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
   return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2755,7 +2505,6 @@ vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
   return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2766,7 +2515,6 @@ vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
   return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2777,7 +2525,6 @@ vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
   return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2788,7 +2535,6 @@ vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
   return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2799,7 +2545,6 @@ vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64
   return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2810,7 +2555,6 @@ vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64
   return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2821,7 +2565,6 @@ vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64
   return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2832,7 +2575,6 @@ vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_
   return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2843,7 +2585,6 @@ vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
   return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2854,7 +2595,6 @@ vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
   return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2865,7 +2605,6 @@ vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
   return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2876,7 +2615,6 @@ vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
   return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2887,7 +2625,6 @@ vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
   return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2898,7 +2635,6 @@ vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
   return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2909,7 +2645,6 @@ vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
   return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2920,7 +2655,6 @@ vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
   return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2931,7 +2665,6 @@ vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
   return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2942,7 +2675,6 @@ vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
   return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2953,7 +2685,6 @@ vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
   return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2964,7 +2695,6 @@ vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
   return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2975,7 +2705,6 @@ vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uin
   return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2986,7 +2715,6 @@ vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uin
   return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2997,7 +2725,6 @@ vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uin
   return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3008,7 +2735,6 @@ vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t
   return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3019,7 +2745,6 @@ vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t
   return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3030,7 +2755,6 @@ vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t
   return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -3041,7 +2765,6 @@ vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t
   return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3052,7 +2775,6 @@ vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
   return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3063,7 +2785,6 @@ vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
   return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3074,7 +2795,6 @@ vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
   return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3085,7 +2805,6 @@ vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
   return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3096,7 +2815,6 @@ vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
   return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3107,7 +2825,6 @@ vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_
   return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3118,7 +2835,6 @@ vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
   return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3129,7 +2845,6 @@ vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
   return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3140,7 +2855,6 @@ vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
   return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3151,7 +2865,6 @@ vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
   return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3162,7 +2875,6 @@ vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
   return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3173,7 +2885,6 @@ vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
   return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3184,7 +2895,6 @@ vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
   return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3195,7 +2905,6 @@ vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
   return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3206,7 +2915,6 @@ vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
   return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3217,7 +2925,6 @@ vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3228,7 +2935,6 @@ vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3239,7 +2945,6 @@ vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uin
   return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3250,7 +2955,6 @@ vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint
   return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3261,7 +2965,6 @@ vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint
   return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3272,7 +2975,6 @@ vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint
   return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3283,7 +2985,6 @@ vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3294,7 +2995,6 @@ vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3305,7 +3005,6 @@ vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
   return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3316,7 +3015,6 @@ vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
   return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3327,7 +3025,6 @@ vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
   return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3338,7 +3035,6 @@ vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uin
   return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3349,7 +3045,6 @@ vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3360,7 +3055,6 @@ vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3371,7 +3065,6 @@ vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
   return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3382,7 +3075,6 @@ vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
   return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3393,7 +3085,6 @@ vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
   return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3404,7 +3095,6 @@ vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
   return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3415,7 +3105,6 @@ vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
   return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3426,7 +3115,6 @@ vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
   return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3437,7 +3125,6 @@ vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
   return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3448,7 +3135,6 @@ vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3459,7 +3145,6 @@ vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uin
   return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3470,7 +3155,6 @@ vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uin
   return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3481,7 +3165,6 @@ vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint
   return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3492,7 +3175,6 @@ vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint
   return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3503,7 +3185,6 @@ vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3514,7 +3195,6 @@ vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
   return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3525,7 +3205,6 @@ vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
   return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3536,7 +3215,6 @@ vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
   return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3547,7 +3225,6 @@ vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
   return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3558,7 +3235,6 @@ vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3569,7 +3245,6 @@ vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
   return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3580,7 +3255,6 @@ vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
   return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3591,7 +3265,6 @@ vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
   return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3602,7 +3275,6 @@ vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
   return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3613,7 +3285,6 @@ vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
   return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3624,7 +3295,6 @@ vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
   return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3635,7 +3305,6 @@ vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
   return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3646,7 +3315,6 @@ vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
   return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3657,7 +3325,6 @@ vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uin
   return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3668,7 +3335,6 @@ vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uin
   return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3679,7 +3345,6 @@ vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uin
   return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3690,7 +3355,6 @@ vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint
   return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3701,7 +3365,6 @@ vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
   return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3712,7 +3375,6 @@ vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
   return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3723,7 +3385,6 @@ vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
   return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3734,7 +3395,6 @@ vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
   return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3745,7 +3405,6 @@ vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
   return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3756,7 +3415,6 @@ vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
   return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3767,7 +3425,6 @@ vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
   return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3778,7 +3435,6 @@ vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
   return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3789,7 +3445,6 @@ vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
   return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3800,7 +3455,6 @@ vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
   return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3811,7 +3465,6 @@ vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
   return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3822,7 +3475,6 @@ vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
   return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3833,7 +3485,6 @@ vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, cons
   return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -3844,7 +3495,6 @@ vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const f
   return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -3855,7 +3505,6 @@ vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const f
   return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -3866,7 +3515,6 @@ vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const fl
   return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -3877,7 +3525,6 @@ vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const fl
   return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3888,7 +3535,6 @@ vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
   return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -3899,7 +3545,6 @@ vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
   return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -3910,7 +3555,6 @@ vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
   return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -3921,7 +3565,6 @@ vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
   return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -3932,7 +3575,6 @@ vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
   return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3943,7 +3585,6 @@ vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
   return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -3954,7 +3595,6 @@ vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
   return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -3965,7 +3605,6 @@ vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
   return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -3976,7 +3615,6 @@ vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
   return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -3987,7 +3625,6 @@ vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
   return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -3998,7 +3635,6 @@ vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
   return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4009,7 +3645,6 @@ vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
   return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4020,7 +3655,6 @@ vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
   return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4031,7 +3665,6 @@ vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
   return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4042,7 +3675,6 @@ vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const d
   return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4053,7 +3685,6 @@ vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const d
   return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4064,7 +3695,6 @@ vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const d
   return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4075,7 +3705,6 @@ vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const do
   return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4086,7 +3715,6 @@ vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
   return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4097,7 +3725,6 @@ vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
   return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4108,7 +3735,6 @@ vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
   return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4119,7 +3745,6 @@ vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
   return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4130,7 +3755,6 @@ vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
   return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4141,7 +3765,6 @@ vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
   return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4152,7 +3775,6 @@ vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
   return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4163,7 +3785,6 @@ vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
   return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4174,7 +3795,6 @@ vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
   return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4185,7 +3805,6 @@ vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
   return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4196,7 +3815,6 @@ vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
   return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @testuxei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
index 65dd2bf241b8c..2b729b22ceb22 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
   return vmacc_vv_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
   return vmacc_vx_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
   return vmacc_vv_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
   return vmacc_vx_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
   return vmacc_vv_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
   return vmacc_vx_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
   return vmacc_vv_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
   return vmacc_vx_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
   return vmacc_vv_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
   return vmacc_vx_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
   return vmacc_vv_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
   return vmacc_vx_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
   return vmacc_vv_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
   return vmacc_vx_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
   return vmacc_vv_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vmacc_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
   return vmacc_vv_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vmacc_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
   return vmacc_vv_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
   return vmacc_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
   return vmacc_vv_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
   return vmacc_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
   return vmacc_vv_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
   return vmacc_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
   return vmacc_vv_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
   return vmacc_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
   return vmacc_vv_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vmacc_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
   return vmacc_vv_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
   return vmacc_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
   return vmacc_vv_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
   return vmacc_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
   return vmacc_vv_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
   return vmacc_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
   return vmacc_vv_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
   return vmacc_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
   return vmacc_vv_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
   return vmacc_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
   return vmacc_vv_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
   return vmacc_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
   return vmacc_vv_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
   return vmacc_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
   return vmacc_vv_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
   return vmacc_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
   return vmacc_vv_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
   return vmacc_vx_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
   return vmacc_vv_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
   return vmacc_vx_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
   return vmacc_vv_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
   return vmacc_vx_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
   return vmacc_vv_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
   return vmacc_vx_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
   return vmacc_vv_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
   return vmacc_vx_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
   return vmacc_vv_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
   return vmacc_vx_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
   return vmacc_vv_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
   return vmacc_vx_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
   return vmacc_vv_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
   return vmacc_vx_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
   return vmacc_vv_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
   return vmacc_vx_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
   return vmacc_vv_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vmacc_vx_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
   return vmacc_vv_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vmacc_vx_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
   return vmacc_vv_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vmacc_vx_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
   return vmacc_vv_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vmacc_vx_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
   return vmacc_vv_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
   return vmacc_vx_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
   return vmacc_vv_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vmacc_vx_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
   return vmacc_vv_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vmacc_vx_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
   return vmacc_vv_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vmacc_vx_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
   return vmacc_vv_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vmacc_vx_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
   return vmacc_vv_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vmacc_vx_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
   return vmacc_vv_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vmacc_vx_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
   return vmacc_vv_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vmacc_vx_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
   return vmacc_vv_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vmacc_vx_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
   return vmacc_vv_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
   return vmacc_vx_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
   return vmacc_vv_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
   return vmacc_vx_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
   return vmacc_vv_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
   return vmacc_vx_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
   return vmacc_vv_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
   return vmacc_vx_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
   return vmacc_vv_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
   return vmacc_vx_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
   return vmacc_vv_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
   return vmacc_vx_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
   return vmacc_vv_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
   return vmacc_vx_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
   return vmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
   return vmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
   return vmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
   return vmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
   return vmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
   return vmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
   return vmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
   return vmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
   return vmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
   return vmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
   return vmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
   return vmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
   return vmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
   return vmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
   return vmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
   return vmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
   return vmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
   return vmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
   return vmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
   return vmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
   return vmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
   return vmacc_vv_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vmacc_vx_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
   return vmacc_vv_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vmacc_vx_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
   return vmacc_vv_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vmacc_vx_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
   return vmacc_vv_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
   return vmacc_vx_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
   return vmacc_vv_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
   return vmacc_vx_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
   return vmacc_vv_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
   return vmacc_vx_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
   return vmacc_vv_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
   return vmacc_vx_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
   return vmacc_vv_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
   return vmacc_vx_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
   return vmacc_vv_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
   return vmacc_vx_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
   return vmacc_vv_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
   return vmacc_vx_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
   return vmacc_vv_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vmacc_vx_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
   return vmacc_vv_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vmacc_vx_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
   return vmacc_vv_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vmacc_vx_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
   return vmacc_vv_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
   return vmacc_vx_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
   return vmacc_vv_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
   return vmacc_vx_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
   return vmacc_vv_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
   return vmacc_vx_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
   return vmacc_vv_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vmacc_vx_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
   return vmacc_vv_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vmacc_vx_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
   return vmacc_vv_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
   return vmacc_vx_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
   return vmacc_vv_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
   return vmacc_vx_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
   return vmacc_vv_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
   return vmacc_vx_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
   return vmacc_vv_u64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c
index 6ef5fa580eefa..cdfc612234c3f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
   return vmadc_vvm_i8mf8_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
   return vmadc_vxm_i8mf8_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -36,7 +33,6 @@ vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmadc_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -46,7 +42,6 @@ vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -57,7 +52,6 @@ vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
   return vmadc_vvm_i8mf4_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
   return vmadc_vxm_i8mf4_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmadc_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -99,7 +90,6 @@ vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
   return vmadc_vvm_i8mf2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
   return vmadc_vxm_i8mf2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -120,7 +109,6 @@ vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmadc_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -130,7 +118,6 @@ vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -141,7 +128,6 @@ vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
   return vmadc_vvm_i8m1_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin,
   return vmadc_vxm_i8m1_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -162,7 +147,6 @@ vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmadc_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
   return vmadc_vvm_i8m2_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin,
   return vmadc_vxm_i8m2_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +185,6 @@ vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmadc_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -225,7 +204,6 @@ vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
   return vmadc_vvm_i8m4_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -236,7 +214,6 @@ vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin,
   return vmadc_vxm_i8m4_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +223,6 @@ vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmadc_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +232,6 @@ vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -267,7 +242,6 @@ vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
   return vmadc_vvm_i8m8_b1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -278,7 +252,6 @@ vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin,
   return vmadc_vxm_i8m8_b1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -288,7 +261,6 @@ vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmadc_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -298,7 +270,6 @@ vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmadc_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -309,7 +280,6 @@ vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmadc_vvm_i16mf4_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -320,7 +290,6 @@ vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
   return vmadc_vxm_i16mf4_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -331,7 +300,6 @@ vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmadc_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -341,7 +309,6 @@ vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmadc_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -352,7 +319,6 @@ vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmadc_vvm_i16mf2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -363,7 +329,6 @@ vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
   return vmadc_vxm_i16mf2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +339,6 @@ vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmadc_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +348,6 @@ vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmadc_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -395,7 +358,6 @@ vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
   return vmadc_vvm_i16m1_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -406,7 +368,6 @@ vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
   return vmadc_vxm_i16m1_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -416,7 +377,6 @@ vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmadc_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -426,7 +386,6 @@ vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmadc_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -437,7 +396,6 @@ vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
   return vmadc_vvm_i16m2_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -448,7 +406,6 @@ vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin,
   return vmadc_vxm_i16m2_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +415,6 @@ vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmadc_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +424,6 @@ vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmadc_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -479,7 +434,6 @@ vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
   return vmadc_vvm_i16m4_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -490,7 +444,6 @@ vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
   return vmadc_vxm_i16m4_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +453,6 @@ vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmadc_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +462,6 @@ vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmadc_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +472,6 @@ vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
   return vmadc_vvm_i16m8_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +482,6 @@ vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin,
   return vmadc_vxm_i16m8_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -542,7 +491,6 @@ vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmadc_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -552,7 +500,6 @@ vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmadc_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -563,7 +510,6 @@ vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmadc_vvm_i32mf2_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -574,7 +520,6 @@ vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
   return vmadc_vxm_i32mf2_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -585,7 +530,6 @@ vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmadc_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +539,6 @@ vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmadc_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -606,7 +549,6 @@ vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
   return vmadc_vvm_i32m1_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -617,7 +559,6 @@ vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
   return vmadc_vxm_i32m1_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -627,7 +568,6 @@ vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmadc_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -637,7 +577,6 @@ vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmadc_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -648,7 +587,6 @@ vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
   return vmadc_vvm_i32m2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -659,7 +597,6 @@ vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
   return vmadc_vxm_i32m2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +606,6 @@ vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmadc_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +615,6 @@ vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmadc_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -690,7 +625,6 @@ vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
   return vmadc_vvm_i32m4_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -701,7 +635,6 @@ vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin,
   return vmadc_vxm_i32m4_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -711,7 +644,6 @@ vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmadc_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -721,7 +653,6 @@ vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmadc_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -732,7 +663,6 @@ vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
   return vmadc_vvm_i32m8_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -743,7 +673,6 @@ vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin,
   return vmadc_vxm_i32m8_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -753,7 +682,6 @@ vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmadc_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -763,7 +691,6 @@ vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmadc_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +701,6 @@ vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
   return vmadc_vvm_i64m1_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +711,6 @@ vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
   return vmadc_vxm_i64m1_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -795,7 +720,6 @@ vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmadc_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -805,7 +729,6 @@ vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmadc_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -816,7 +739,6 @@ vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
   return vmadc_vvm_i64m2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -827,7 +749,6 @@ vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
   return vmadc_vxm_i64m2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +758,6 @@ vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmadc_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +767,6 @@ vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmadc_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -858,7 +777,6 @@ vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
   return vmadc_vvm_i64m4_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -869,7 +787,6 @@ vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
   return vmadc_vxm_i64m4_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +796,6 @@ vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmadc_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +805,6 @@ vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmadc_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -900,7 +815,6 @@ vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
   return vmadc_vvm_i64m8_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -911,7 +825,6 @@ vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
   return vmadc_vxm_i64m8_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +834,6 @@ vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmadc_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +843,6 @@ vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmadc_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -942,7 +853,6 @@ vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmadc_vvm_u8mf8_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -953,7 +863,6 @@ vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
   return vmadc_vxm_u8mf8_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -963,7 +872,6 @@ vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmadc_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -973,7 +881,6 @@ vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -984,7 +891,6 @@ vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmadc_vvm_u8mf4_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -995,7 +901,6 @@ vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
   return vmadc_vxm_u8mf4_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +910,6 @@ vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmadc_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1015,7 +919,6 @@ vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1026,7 +929,6 @@ vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmadc_vvm_u8mf2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1037,7 +939,6 @@ vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
   return vmadc_vxm_u8mf2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1047,7 +948,6 @@ vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmadc_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +957,6 @@ vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1068,7 +967,6 @@ vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
   return vmadc_vvm_u8m1_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +977,6 @@ vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
   return vmadc_vxm_u8m1_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1089,7 +986,6 @@ vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmadc_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +995,6 @@ vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1005,6 @@ vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
   return vmadc_vvm_u8m2_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1015,6 @@ vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
   return vmadc_vxm_u8m2_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1024,6 @@ vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmadc_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1033,6 @@ vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1043,6 @@ vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
   return vmadc_vvm_u8m4_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1163,7 +1053,6 @@ vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
   return vmadc_vxm_u8m4_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1173,7 +1062,6 @@ vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmadc_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1183,7 +1071,6 @@ vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1081,6 @@ vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
   return vmadc_vvm_u8m8_b1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1205,7 +1091,6 @@ vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
   return vmadc_vxm_u8m8_b1(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1215,7 +1100,6 @@ vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmadc_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1109,6 @@ vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmadc_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1119,6 @@ vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmadc_vvm_u16mf4_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1129,6 @@ vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
   return vmadc_vxm_u16mf4_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1258,7 +1139,6 @@ vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmadc_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1268,7 +1148,6 @@ vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmadc_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1279,7 +1158,6 @@ vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmadc_vvm_u16mf2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1290,7 +1168,6 @@ vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
   return vmadc_vxm_u16mf2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1301,7 +1178,6 @@ vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmadc_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1187,6 @@ vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmadc_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1197,6 @@ vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmadc_vvm_u16m1_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1207,6 @@ vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
   return vmadc_vxm_u16m1_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1343,7 +1216,6 @@ vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmadc_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1225,6 @@ vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmadc_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1235,6 @@ vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
   return vmadc_vvm_u16m2_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1245,6 @@ vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
   return vmadc_vxm_u16m2_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1385,7 +1254,6 @@ vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmadc_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1395,7 +1263,6 @@ vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmadc_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1406,7 +1273,6 @@ vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
   return vmadc_vvm_u16m4_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1417,7 +1283,6 @@ vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
   return vmadc_vxm_u16m4_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1427,7 +1292,6 @@ vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmadc_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1437,7 +1301,6 @@ vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmadc_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1448,7 +1311,6 @@ vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
   return vmadc_vvm_u16m8_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1459,7 +1321,6 @@ vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
   return vmadc_vxm_u16m8_b2(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1469,7 +1330,6 @@ vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmadc_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1339,6 @@ vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmadc_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1349,6 @@ vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmadc_vvm_u32mf2_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1359,6 @@ vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
   return vmadc_vxm_u32mf2_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1369,6 @@ vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmadc_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1522,7 +1378,6 @@ vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmadc_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1533,7 +1388,6 @@ vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmadc_vvm_u32m1_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1398,6 @@ vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
   return vmadc_vxm_u32m1_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1407,6 @@ vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmadc_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1416,6 @@ vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmadc_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1575,7 +1426,6 @@ vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmadc_vvm_u32m2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1586,7 +1436,6 @@ vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
   return vmadc_vxm_u32m2_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1596,7 +1445,6 @@ vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmadc_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1454,6 @@ vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmadc_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1617,7 +1464,6 @@ vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
   return vmadc_vvm_u32m4_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1628,7 +1474,6 @@ vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
   return vmadc_vxm_u32m4_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmadc_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmadc_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1659,7 +1502,6 @@ vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
   return vmadc_vvm_u32m8_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1670,7 +1512,6 @@ vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
   return vmadc_vxm_u32m8_b4(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1680,7 +1521,6 @@ vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmadc_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1690,7 +1530,6 @@ vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmadc_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1701,7 +1540,6 @@ vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmadc_vvm_u64m1_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1712,7 +1550,6 @@ vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
   return vmadc_vxm_u64m1_b64(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1722,7 +1559,6 @@ vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmadc_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1732,7 +1568,6 @@ vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmadc_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1743,7 +1578,6 @@ vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmadc_vvm_u64m2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1588,6 @@ vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
   return vmadc_vxm_u64m2_b32(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1597,6 @@ vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmadc_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1606,6 @@ vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmadc_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1616,6 @@ vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmadc_vvm_u64m4_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1626,6 @@ vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
   return vmadc_vxm_u64m4_b16(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1806,7 +1635,6 @@ vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmadc_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1816,7 +1644,6 @@ vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmadc_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1827,7 +1654,6 @@ vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
   return vmadc_vvm_u64m8_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1838,7 +1664,6 @@ vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
   return vmadc_vxm_u64m8_b8(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1848,7 +1673,6 @@ vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmadc_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
index c2d044a042c4e..7157c8e45003a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
   return vmadd_vv_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
   return vmadd_vx_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
   return vmadd_vv_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
   return vmadd_vx_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
   return vmadd_vv_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
   return vmadd_vx_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
   return vmadd_vv_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
   return vmadd_vx_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
   return vmadd_vv_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
   return vmadd_vx_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
   return vmadd_vv_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
   return vmadd_vx_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
   return vmadd_vv_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
   return vmadd_vx_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
   return vmadd_vv_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vmadd_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
   return vmadd_vv_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vmadd_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
   return vmadd_vv_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
   return vmadd_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
   return vmadd_vv_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
   return vmadd_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
   return vmadd_vv_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
   return vmadd_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
   return vmadd_vv_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
   return vmadd_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
   return vmadd_vv_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vmadd_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
   return vmadd_vv_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
   return vmadd_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
   return vmadd_vv_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
   return vmadd_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
   return vmadd_vv_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
   return vmadd_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
   return vmadd_vv_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
   return vmadd_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
   return vmadd_vv_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
   return vmadd_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
   return vmadd_vv_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
   return vmadd_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
   return vmadd_vv_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
   return vmadd_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
   return vmadd_vv_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
   return vmadd_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
   return vmadd_vv_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
   return vmadd_vx_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
   return vmadd_vv_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
   return vmadd_vx_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
   return vmadd_vv_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
   return vmadd_vx_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
   return vmadd_vv_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
   return vmadd_vx_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
   return vmadd_vv_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
   return vmadd_vx_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
   return vmadd_vv_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
   return vmadd_vx_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
   return vmadd_vv_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
   return vmadd_vx_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
   return vmadd_vv_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
   return vmadd_vx_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
   return vmadd_vv_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
   return vmadd_vx_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
   return vmadd_vv_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vmadd_vx_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
   return vmadd_vv_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vmadd_vx_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
   return vmadd_vv_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vmadd_vx_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
   return vmadd_vv_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vmadd_vx_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
   return vmadd_vv_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
   return vmadd_vx_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
   return vmadd_vv_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vmadd_vx_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
   return vmadd_vv_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vmadd_vx_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
   return vmadd_vv_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vmadd_vx_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
   return vmadd_vv_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vmadd_vx_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
   return vmadd_vv_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vmadd_vx_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
   return vmadd_vv_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vmadd_vx_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
   return vmadd_vv_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vmadd_vx_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
   return vmadd_vv_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vmadd_vx_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
   return vmadd_vv_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
   return vmadd_vx_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
   return vmadd_vv_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
   return vmadd_vx_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
   return vmadd_vv_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
   return vmadd_vx_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
   return vmadd_vv_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
   return vmadd_vx_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
   return vmadd_vv_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
   return vmadd_vx_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
   return vmadd_vv_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
   return vmadd_vx_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
   return vmadd_vv_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
   return vmadd_vx_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vmadd_vv_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
   return vmadd_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vmadd_vv_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
   return vmadd_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
   return vmadd_vv_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
   return vmadd_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vmadd_vv_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
   return vmadd_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vmadd_vv_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
   return vmadd_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vmadd_vv_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
   return vmadd_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vmadd_vv_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
   return vmadd_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
   return vmadd_vv_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
   return vmadd_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
   return vmadd_vv_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
   return vmadd_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vmadd_vv_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
   return vmadd_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vmadd_vv_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
   return vmadd_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
   return vmadd_vv_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
   return vmadd_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
   return vmadd_vv_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
   return vmadd_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
   return vmadd_vv_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
   return vmadd_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vmadd_vv_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
   return vmadd_vx_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
   return vmadd_vv_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vmadd_vx_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
   return vmadd_vv_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vmadd_vx_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
   return vmadd_vv_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vmadd_vx_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
   return vmadd_vv_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
   return vmadd_vx_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
   return vmadd_vv_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
   return vmadd_vx_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
   return vmadd_vv_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
   return vmadd_vx_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
   return vmadd_vv_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
   return vmadd_vx_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
   return vmadd_vv_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
   return vmadd_vx_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
   return vmadd_vv_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
   return vmadd_vx_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
   return vmadd_vv_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
   return vmadd_vx_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
   return vmadd_vv_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vmadd_vx_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
   return vmadd_vv_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vmadd_vx_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
   return vmadd_vv_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vmadd_vx_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
   return vmadd_vv_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
   return vmadd_vx_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
   return vmadd_vv_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
   return vmadd_vx_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
   return vmadd_vv_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
   return vmadd_vx_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
   return vmadd_vv_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vmadd_vx_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
   return vmadd_vv_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vmadd_vx_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
   return vmadd_vv_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
   return vmadd_vx_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
   return vmadd_vv_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
   return vmadd_vx_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
   return vmadd_vv_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
   return vmadd_vx_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
   return vmadd_vv_u64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
index 248d1b2029e1f..d9d3caae762b8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmand_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmand_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmand_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmand_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmand_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmand_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
   return vmand_mm_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmandnot_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmandnot_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmandnot_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmandnot_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmandnot_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmandnot_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
index 4abbb5de622dd..903a3a4261d05 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmax_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmax_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmax_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmax_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmax_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmax_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmax_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmax_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmax_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmax_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmax_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmax_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmax_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmax_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmax_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmax_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmax_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmax_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmax_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmax_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmax_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmax_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmax_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmax_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmax_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmax_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmax_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmax_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmax_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmax_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmax_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmax_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmax_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmax_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmax_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmax_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmax_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmax_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmaxu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmaxu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmaxu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmaxu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmaxu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmaxu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmaxu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmaxu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vmaxu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmaxu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vmaxu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmaxu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmaxu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmaxu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmaxu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmaxu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmaxu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmaxu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmaxu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmaxu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vmaxu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmaxu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmaxu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmaxu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmaxu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmaxu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmaxu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmaxu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmaxu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmaxu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmaxu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmaxu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmaxu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmaxu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmaxu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmaxu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmaxu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmaxu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vmax_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vmax_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vmax_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vmax_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vmax_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vmax_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vmax_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vmax_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vmax_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vmax_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vmax_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vmax_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vmax_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vmax_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vmax_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vmax_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vmax_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vmax_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vmax_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vmax_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmax_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmax_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmax_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmax_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmax_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmax_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vmax_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vmax_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vmax_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vmax_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vmax_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vmax_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmax_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmax_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmax_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmax_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vmax_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vmax_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vmax_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vmax_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vmax_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vmax_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmax_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmax_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vmaxu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vmaxu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vmaxu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vmaxu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vmaxu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vmaxu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vmaxu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vmaxu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vmaxu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vmaxu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vmaxu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vmaxu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vmaxu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vmaxu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vmaxu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vmaxu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vmaxu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vmaxu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vmaxu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vmaxu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vmaxu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vmaxu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vmaxu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vmaxu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vmaxu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vmaxu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vmaxu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vmaxu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vmaxu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vmaxu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vmaxu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vmaxu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vmaxu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vmaxu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vmaxu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vmaxu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vmaxu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vmaxu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vmaxu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vmaxu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vmaxu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vmaxu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
   return vmaxu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
index 435992e7fbe1f..885c7cebdc76d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1.i64(i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 vbool1_t test_vmclr_m_b1(size_t vl) { return vmclr_m_b1(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1.i64(i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ vbool1_t test_vmclr_m_b1(size_t vl) { return vmclr_m_b1(vl); }
 //
 vbool2_t test_vmclr_m_b2(size_t vl) { return vmclr_m_b2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1.i64(i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ vbool2_t test_vmclr_m_b2(size_t vl) { return vmclr_m_b2(vl); }
 //
 vbool4_t test_vmclr_m_b4(size_t vl) { return vmclr_m_b4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1.i64(i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ vbool4_t test_vmclr_m_b4(size_t vl) { return vmclr_m_b4(vl); }
 //
 vbool8_t test_vmclr_m_b8(size_t vl) { return vmclr_m_b8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1.i64(i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ vbool8_t test_vmclr_m_b8(size_t vl) { return vmclr_m_b8(vl); }
 //
 vbool16_t test_vmclr_m_b16(size_t vl) { return vmclr_m_b16(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1.i64(i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ vbool16_t test_vmclr_m_b16(size_t vl) { return vmclr_m_b16(vl); }
 //
 vbool32_t test_vmclr_m_b32(size_t vl) { return vmclr_m_b32(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmclr_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1.i64(i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
index f4170f7265d6b..e9675dbcece67 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
   return vmerge_vvm_i8mf8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
   return vmerge_vxm_i8mf8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
   return vmerge_vvm_i8mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
   return vmerge_vxm_i8mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
   return vmerge_vvm_i8mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
   return vmerge_vxm_i8mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
   return vmerge_vvm_i8m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
   return vmerge_vxm_i8m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
   return vmerge_vvm_i8m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
   return vmerge_vxm_i8m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
   return vmerge_vvm_i8m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
   return vmerge_vxm_i8m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
   return vmerge_vvm_i8m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
   return vmerge_vxm_i8m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
   return vmerge_vvm_i16mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
   return vmerge_vxm_i16mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
   return vmerge_vvm_i16mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
   return vmerge_vxm_i16mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +195,6 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
   return vmerge_vvm_i16m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -225,7 +205,6 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
   return vmerge_vxm_i16m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -236,7 +215,6 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
   return vmerge_vvm_i16m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -247,7 +225,6 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
   return vmerge_vxm_i16m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -258,7 +235,6 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
   return vmerge_vvm_i16m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -269,7 +245,6 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
   return vmerge_vxm_i16m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -280,7 +255,6 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
   return vmerge_vvm_i16m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -291,7 +265,6 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
   return vmerge_vxm_i16m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -302,7 +275,6 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
   return vmerge_vvm_i32mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -313,7 +285,6 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
   return vmerge_vxm_i32mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -324,7 +295,6 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
   return vmerge_vvm_i32m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +305,6 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
   return vmerge_vxm_i32m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
   return vmerge_vvm_i32m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -357,7 +325,6 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
   return vmerge_vxm_i32m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -368,7 +335,6 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
   return vmerge_vvm_i32m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -379,7 +345,6 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
   return vmerge_vxm_i32m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
   return vmerge_vvm_i32m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -401,7 +365,6 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
   return vmerge_vxm_i32m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
   return vmerge_vvm_i64m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -423,7 +385,6 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
   return vmerge_vxm_i64m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -434,7 +395,6 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
   return vmerge_vvm_i64m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -445,7 +405,6 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
   return vmerge_vxm_i64m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -456,7 +415,6 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
   return vmerge_vvm_i64m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -467,7 +425,6 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
   return vmerge_vxm_i64m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -478,7 +435,6 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
   return vmerge_vvm_i64m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -489,7 +445,6 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
   return vmerge_vxm_i64m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
   return vmerge_vvm_u8mf8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -511,7 +465,6 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
   return vmerge_vxm_u8mf8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -522,7 +475,6 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
   return vmerge_vvm_u8mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -533,7 +485,6 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
   return vmerge_vxm_u8mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +495,6 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
   return vmerge_vvm_u8mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -555,7 +505,6 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
   return vmerge_vxm_u8mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -566,7 +515,6 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
   return vmerge_vvm_u8m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -577,7 +525,6 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
   return vmerge_vxm_u8m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -588,7 +535,6 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
   return vmerge_vvm_u8m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -599,7 +545,6 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
   return vmerge_vxm_u8m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -610,7 +555,6 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
   return vmerge_vvm_u8m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -621,7 +565,6 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
   return vmerge_vxm_u8m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +575,6 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
   return vmerge_vvm_u8m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -643,7 +585,6 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
   return vmerge_vxm_u8m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +595,6 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
   return vmerge_vvm_u16mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +605,6 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
   return vmerge_vxm_u16mf4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -676,7 +615,6 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
   return vmerge_vvm_u16mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -687,7 +625,6 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
   return vmerge_vxm_u16mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -698,7 +635,6 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
   return vmerge_vvm_u16m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -709,7 +645,6 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
   return vmerge_vxm_u16m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -720,7 +655,6 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
   return vmerge_vvm_u16m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -731,7 +665,6 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
   return vmerge_vxm_u16m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -742,7 +675,6 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
   return vmerge_vvm_u16m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -753,7 +685,6 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
   return vmerge_vxm_u16m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +695,6 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
   return vmerge_vvm_u16m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -775,7 +705,6 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
   return vmerge_vxm_u16m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -786,7 +715,6 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
   return vmerge_vvm_u32mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -797,7 +725,6 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
   return vmerge_vxm_u32mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -808,7 +735,6 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
   return vmerge_vvm_u32m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -819,7 +745,6 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
   return vmerge_vxm_u32m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -830,7 +755,6 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
   return vmerge_vvm_u32m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -841,7 +765,6 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
   return vmerge_vxm_u32m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -852,7 +775,6 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
   return vmerge_vvm_u32m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -863,7 +785,6 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
   return vmerge_vxm_u32m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -874,7 +795,6 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
   return vmerge_vvm_u32m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -885,7 +805,6 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
   return vmerge_vxm_u32m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +815,6 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
   return vmerge_vvm_u64m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -907,7 +825,6 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
   return vmerge_vxm_u64m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -918,7 +835,6 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
   return vmerge_vvm_u64m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -929,7 +845,6 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
   return vmerge_vxm_u64m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -940,7 +855,6 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
   return vmerge_vvm_u64m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -951,7 +865,6 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
   return vmerge_vxm_u64m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -962,7 +875,6 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
   return vmerge_vvm_u64m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -973,7 +885,6 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
   return vmerge_vxm_u64m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +895,6 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
   return vmerge_vvm_f32mf2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -995,7 +905,6 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
   return vmerge_vvm_f32m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1006,7 +915,6 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
   return vmerge_vvm_f32m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1017,7 +925,6 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
   return vmerge_vvm_f32m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1028,7 +935,6 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
   return vmerge_vvm_f32m8(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1039,7 +945,6 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
   return vmerge_vvm_f64m1(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1050,7 +955,6 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
   return vmerge_vvm_f64m2(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1061,7 +965,6 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
   return vmerge_vvm_f64m4(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
index 21ffa8dee6f27..b40e35b8a12d1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmfeq_vv_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfeq_vf_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmfeq_vv_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmfeq_vf_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmfeq_vv_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmfeq_vf_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmfeq_vv_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmfeq_vf_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmfeq_vv_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmfeq_vf_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmfeq_vv_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmfeq_vf_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmfeq_vv_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmfeq_vf_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmfeq_vv_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmfeq_vf_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmfeq_vv_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmfeq_vf_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
index cc8923c5cddd7..f8592ca28d72f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmfge_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
   return vmfge_vv_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vbool64_t test_vmfge_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfge_vf_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vbool32_t test_vmfge_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl
   return vmfge_vv_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vbool32_t test_vmfge_vf_f32m1_b32 (vfloat32m1_t op1, float op2, size_t vl) {
   return vmfge_vf_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vbool16_t test_vmfge_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl
   return vmfge_vv_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vbool16_t test_vmfge_vf_f32m2_b16 (vfloat32m2_t op1, float op2, size_t vl) {
   return vmfge_vf_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vbool8_t test_vmfge_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
   return vmfge_vv_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool8_t test_vmfge_vf_f32m4_b8 (vfloat32m4_t op1, float op2, size_t vl) {
   return vmfge_vf_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vbool4_t test_vmfge_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
   return vmfge_vv_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vbool4_t test_vmfge_vf_f32m8_b4 (vfloat32m8_t op1, float op2, size_t vl) {
   return vmfge_vf_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vbool64_t test_vmfge_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl
   return vmfge_vv_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vbool64_t test_vmfge_vf_f64m1_b64 (vfloat64m1_t op1, double op2, size_t vl) {
   return vmfge_vf_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vbool32_t test_vmfge_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl
   return vmfge_vv_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vbool32_t test_vmfge_vf_f64m2_b32 (vfloat64m2_t op1, double op2, size_t vl) {
   return vmfge_vf_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool16_t test_vmfge_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl
   return vmfge_vv_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool16_t test_vmfge_vf_f64m4_b16 (vfloat64m4_t op1, double op2, size_t vl) {
   return vmfge_vf_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vbool8_t test_vmfge_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
   return vmfge_vv_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vbool8_t test_vmfge_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) {
   return vmfge_vf_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vbool64_t test_vmfge_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfge_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vbool64_t test_vmfge_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfge_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vbool32_t test_vmfge_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vbool32_t test_vmfge_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vbool16_t test_vmfge_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vbool16_t test_vmfge_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vbool8_t test_vmfge_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfge_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vbool8_t test_vmfge_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfge_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vbool4_t test_vmfge_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfge_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vbool4_t test_vmfge_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfge_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vbool64_t test_vmfge_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfge_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vbool64_t test_vmfge_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfge_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vbool32_t test_vmfge_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vbool32_t test_vmfge_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vbool16_t test_vmfge_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vbool16_t test_vmfge_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vbool8_t test_vmfge_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
   return vmfge_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
index 8517f133dbd84..802c6d1f4389e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmfgt_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
   return vmfgt_vv_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vbool64_t test_vmfgt_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfgt_vf_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vbool32_t test_vmfgt_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl
   return vmfgt_vv_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vbool32_t test_vmfgt_vf_f32m1_b32 (vfloat32m1_t op1, float op2, size_t vl) {
   return vmfgt_vf_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vbool16_t test_vmfgt_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl
   return vmfgt_vv_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vbool16_t test_vmfgt_vf_f32m2_b16 (vfloat32m2_t op1, float op2, size_t vl) {
   return vmfgt_vf_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vbool8_t test_vmfgt_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
   return vmfgt_vv_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool8_t test_vmfgt_vf_f32m4_b8 (vfloat32m4_t op1, float op2, size_t vl) {
   return vmfgt_vf_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vbool4_t test_vmfgt_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
   return vmfgt_vv_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vbool4_t test_vmfgt_vf_f32m8_b4 (vfloat32m8_t op1, float op2, size_t vl) {
   return vmfgt_vf_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vbool64_t test_vmfgt_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl
   return vmfgt_vv_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vbool64_t test_vmfgt_vf_f64m1_b64 (vfloat64m1_t op1, double op2, size_t vl) {
   return vmfgt_vf_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vbool32_t test_vmfgt_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl
   return vmfgt_vv_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vbool32_t test_vmfgt_vf_f64m2_b32 (vfloat64m2_t op1, double op2, size_t vl) {
   return vmfgt_vf_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool16_t test_vmfgt_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl
   return vmfgt_vv_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool16_t test_vmfgt_vf_f64m4_b16 (vfloat64m4_t op1, double op2, size_t vl) {
   return vmfgt_vf_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
   return vmfgt_vv_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vbool8_t test_vmfgt_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) {
   return vmfgt_vf_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfgt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfgt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vbool32_t test_vmfgt_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vbool32_t test_vmfgt_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vbool16_t test_vmfgt_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vbool16_t test_vmfgt_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vbool8_t test_vmfgt_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfgt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vbool8_t test_vmfgt_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfgt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vbool4_t test_vmfgt_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfgt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vbool4_t test_vmfgt_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfgt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vbool64_t test_vmfgt_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfgt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vbool64_t test_vmfgt_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfgt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vbool32_t test_vmfgt_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vbool32_t test_vmfgt_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vbool16_t test_vmfgt_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vbool16_t test_vmfgt_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
   return vmfgt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
index bd18398b80168..5aee3d0b71189 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmfle_vv_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfle_vf_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmfle_vv_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmfle_vf_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmfle_vv_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmfle_vf_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmfle_vv_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmfle_vf_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmfle_vv_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmfle_vf_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmfle_vv_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmfle_vf_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmfle_vv_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmfle_vf_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmfle_vv_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmfle_vf_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmfle_vv_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmfle_vf_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
index 47858dab7eacd..6f76063a03cfd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmflt_vv_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmflt_vf_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmflt_vv_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmflt_vf_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmflt_vv_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmflt_vf_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmflt_vv_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmflt_vf_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmflt_vv_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmflt_vf_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmflt_vv_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmflt_vf_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmflt_vv_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmflt_vf_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmflt_vv_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmflt_vf_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmflt_vv_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmflt_vf_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
index 5593b67a133c1..ec2468e3ea66b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmfne_vv_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfne_vf_f32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmfne_vv_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmfne_vf_f32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmfne_vv_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmfne_vf_f32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmfne_vv_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmfne_vf_f32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmfne_vv_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmfne_vf_f32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmfne_vv_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmfne_vf_f64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmfne_vv_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmfne_vf_f64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmfne_vv_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmfne_vf_f64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmfne_vv_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmfne_vf_f64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
index 05a723e0f4223..fe50e2b9492e8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmin_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmin_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmin_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmin_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmin_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmin_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmin_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmin_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmin_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmin_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmin_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmin_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmin_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmin_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmin_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmin_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmin_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmin_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmin_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmin_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmin_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmin_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmin_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmin_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmin_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmin_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmin_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmin_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmin_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmin_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmin_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmin_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmin_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmin_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmin_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmin_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmin_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmin_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vminu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vminu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vminu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vminu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vminu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vminu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vminu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vminu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vminu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vminu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vminu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vminu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vminu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vminu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vminu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vminu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vminu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vminu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vminu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vminu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vminu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vminu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vminu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vminu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vminu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vminu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vminu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vminu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vminu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vminu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vminu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vminu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vminu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vminu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vminu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vminu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vminu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vminu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vmin_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vmin_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vmin_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vmin_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vmin_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vmin_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vmin_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vmin_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vmin_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vmin_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vmin_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vmin_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vmin_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vmin_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vmin_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vmin_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vmin_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vmin_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vmin_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vmin_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmin_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmin_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmin_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmin_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmin_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmin_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vmin_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vmin_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vmin_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vmin_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vmin_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vmin_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmin_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmin_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmin_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmin_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vmin_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vmin_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vmin_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vmin_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vmin_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vmin_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmin_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmin_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vminu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vminu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vminu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vminu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vminu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vminu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vminu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vminu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vminu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vminu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vminu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vminu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vminu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vminu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vminu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vminu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vminu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vminu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vminu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vminu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vminu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vminu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vminu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vminu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vminu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vminu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vminu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vminu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vminu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vminu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vminu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vminu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vminu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vminu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vminu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vminu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vminu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vminu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vminu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vminu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vminu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vminu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
   return vminu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmmv.c
index 2a191a392f671..68b01c5829754 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmmv.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmmv_m_b1 (vbool1_t op1, size_t vl) {
   return vmmv_m_b1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmmv_m_b2 (vbool2_t op1, size_t vl) {
   return vmmv_m_b2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmmv_m_b4 (vbool4_t op1, size_t vl) {
   return vmmv_m_b4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmmv_m_b8 (vbool8_t op1, size_t vl) {
   return vmmv_m_b8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmmv_m_b16 (vbool16_t op1, size_t vl) {
   return vmmv_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmmv_m_b32 (vbool32_t op1, size_t vl) {
   return vmmv_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP1]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
index ec3563a5929c0..67429dd14cf58 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmnand_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmnand_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmnand_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmnand_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmnand_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmnand_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
index 43db419244ab6..2cbad2dd5cce9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmnor_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmnor_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmnor_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmnor_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmnor_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmnor_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnot.c
index 0e91b93b7ea76..4e4456400139a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnot.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmnot_m_b1 (vbool1_t op1, size_t vl) {
   return vmnot_m_b1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmnot_m_b2 (vbool2_t op1, size_t vl) {
   return vmnot_m_b2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmnot_m_b4 (vbool4_t op1, size_t vl) {
   return vmnot_m_b4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmnot_m_b8 (vbool8_t op1, size_t vl) {
   return vmnot_m_b8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmnot_m_b16 (vbool16_t op1, size_t vl) {
   return vmnot_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmnot_m_b32 (vbool32_t op1, size_t vl) {
   return vmnot_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP1]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
index 50990289ea18c..5403ad16f9075 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmor_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmor_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmor_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmor_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmor_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmor_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
   return vmor_mm_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmornot_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmornot_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmornot_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmornot_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmornot_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmornot_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c
index 183e7f3f81414..5f4947ae98b32 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
   return vmsbc_vvm_i8mf8_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
   return vmsbc_vxm_i8mf8_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -36,7 +33,6 @@ vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsbc_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -46,7 +42,6 @@ vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -57,7 +52,6 @@ vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
   return vmsbc_vvm_i8mf4_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
   return vmsbc_vxm_i8mf4_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsbc_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -99,7 +90,6 @@ vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
   return vmsbc_vvm_i8mf2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
   return vmsbc_vxm_i8mf2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -120,7 +109,6 @@ vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsbc_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -130,7 +118,6 @@ vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -141,7 +128,6 @@ vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
   return vmsbc_vvm_i8m1_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
   return vmsbc_vxm_i8m1_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -162,7 +147,6 @@ vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsbc_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
   return vmsbc_vvm_i8m2_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
   return vmsbc_vxm_i8m2_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +185,6 @@ vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsbc_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -225,7 +204,6 @@ vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
   return vmsbc_vvm_i8m4_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -236,7 +214,6 @@ vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
   return vmsbc_vxm_i8m4_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +223,6 @@ vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsbc_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +232,6 @@ vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -267,7 +242,6 @@ vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
   return vmsbc_vvm_i8m8_b1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -278,7 +252,6 @@ vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
   return vmsbc_vxm_i8m8_b1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -288,7 +261,6 @@ vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsbc_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -298,7 +270,6 @@ vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsbc_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -309,7 +280,6 @@ vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsbc_vvm_i16mf4_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -320,7 +290,6 @@ vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
   return vmsbc_vxm_i16mf4_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -331,7 +300,6 @@ vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsbc_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -341,7 +309,6 @@ vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsbc_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -352,7 +319,6 @@ vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsbc_vvm_i16mf2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -363,7 +329,6 @@ vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
   return vmsbc_vxm_i16mf2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +339,6 @@ vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsbc_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +348,6 @@ vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsbc_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -395,7 +358,6 @@ vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
   return vmsbc_vvm_i16m1_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -406,7 +368,6 @@ vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
   return vmsbc_vxm_i16m1_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -416,7 +377,6 @@ vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsbc_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -426,7 +386,6 @@ vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsbc_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -437,7 +396,6 @@ vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
   return vmsbc_vvm_i16m2_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -448,7 +406,6 @@ vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
   return vmsbc_vxm_i16m2_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +415,6 @@ vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsbc_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +424,6 @@ vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsbc_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -479,7 +434,6 @@ vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
   return vmsbc_vvm_i16m4_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -490,7 +444,6 @@ vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
   return vmsbc_vxm_i16m4_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +453,6 @@ vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsbc_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +462,6 @@ vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsbc_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +472,6 @@ vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
   return vmsbc_vvm_i16m8_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +482,6 @@ vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
   return vmsbc_vxm_i16m8_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -542,7 +491,6 @@ vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsbc_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -552,7 +500,6 @@ vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsbc_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -563,7 +510,6 @@ vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsbc_vvm_i32mf2_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -574,7 +520,6 @@ vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
   return vmsbc_vxm_i32mf2_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -585,7 +530,6 @@ vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsbc_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +539,6 @@ vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsbc_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -606,7 +549,6 @@ vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
   return vmsbc_vvm_i32m1_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -617,7 +559,6 @@ vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
   return vmsbc_vxm_i32m1_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -627,7 +568,6 @@ vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsbc_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -637,7 +577,6 @@ vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsbc_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -648,7 +587,6 @@ vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
   return vmsbc_vvm_i32m2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -659,7 +597,6 @@ vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
   return vmsbc_vxm_i32m2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +606,6 @@ vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsbc_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +615,6 @@ vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsbc_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -690,7 +625,6 @@ vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
   return vmsbc_vvm_i32m4_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -701,7 +635,6 @@ vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
   return vmsbc_vxm_i32m4_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -711,7 +644,6 @@ vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsbc_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -721,7 +653,6 @@ vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsbc_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -732,7 +663,6 @@ vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
   return vmsbc_vvm_i32m8_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -743,7 +673,6 @@ vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
   return vmsbc_vxm_i32m8_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -753,7 +682,6 @@ vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsbc_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -763,7 +691,6 @@ vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsbc_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +701,6 @@ vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
   return vmsbc_vvm_i64m1_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +711,6 @@ vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
   return vmsbc_vxm_i64m1_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -795,7 +720,6 @@ vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsbc_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -805,7 +729,6 @@ vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsbc_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -816,7 +739,6 @@ vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
   return vmsbc_vvm_i64m2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -827,7 +749,6 @@ vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
   return vmsbc_vxm_i64m2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +758,6 @@ vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsbc_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +767,6 @@ vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsbc_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -858,7 +777,6 @@ vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
   return vmsbc_vvm_i64m4_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -869,7 +787,6 @@ vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
   return vmsbc_vxm_i64m4_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +796,6 @@ vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsbc_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +805,6 @@ vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsbc_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -900,7 +815,6 @@ vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
   return vmsbc_vvm_i64m8_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -911,7 +825,6 @@ vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
   return vmsbc_vxm_i64m8_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +834,6 @@ vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsbc_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +843,6 @@ vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsbc_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -942,7 +853,6 @@ vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmsbc_vvm_u8mf8_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -953,7 +863,6 @@ vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
   return vmsbc_vxm_u8mf8_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -963,7 +872,6 @@ vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmsbc_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -973,7 +881,6 @@ vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -984,7 +891,6 @@ vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmsbc_vvm_u8mf4_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -995,7 +901,6 @@ vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
   return vmsbc_vxm_u8mf4_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +910,6 @@ vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmsbc_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1015,7 +919,6 @@ vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1026,7 +929,6 @@ vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmsbc_vvm_u8mf2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1037,7 +939,6 @@ vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
   return vmsbc_vxm_u8mf2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1047,7 +948,6 @@ vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmsbc_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +957,6 @@ vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1068,7 +967,6 @@ vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
   return vmsbc_vvm_u8m1_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +977,6 @@ vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
   return vmsbc_vxm_u8m1_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1089,7 +986,6 @@ vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsbc_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +995,6 @@ vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1005,6 @@ vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
   return vmsbc_vvm_u8m2_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1015,6 @@ vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
   return vmsbc_vxm_u8m2_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1024,6 @@ vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsbc_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1033,6 @@ vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1043,6 @@ vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
   return vmsbc_vvm_u8m4_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1163,7 +1053,6 @@ vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
   return vmsbc_vxm_u8m4_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1173,7 +1062,6 @@ vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsbc_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1183,7 +1071,6 @@ vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1081,6 @@ vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
   return vmsbc_vvm_u8m8_b1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1205,7 +1091,6 @@ vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
   return vmsbc_vxm_u8m8_b1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1215,7 +1100,6 @@ vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsbc_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1109,6 @@ vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsbc_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1119,6 @@ vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsbc_vvm_u16mf4_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1129,6 @@ vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
   return vmsbc_vxm_u16mf4_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1258,7 +1139,6 @@ vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsbc_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1268,7 +1148,6 @@ vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsbc_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1279,7 +1158,6 @@ vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsbc_vvm_u16mf2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1290,7 +1168,6 @@ vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
   return vmsbc_vxm_u16mf2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1301,7 +1178,6 @@ vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsbc_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1187,6 @@ vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsbc_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1197,6 @@ vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmsbc_vvm_u16m1_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1207,6 @@ vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
   return vmsbc_vxm_u16m1_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1343,7 +1216,6 @@ vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmsbc_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1225,6 @@ vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsbc_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1235,6 @@ vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
   return vmsbc_vvm_u16m2_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1245,6 @@ vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
   return vmsbc_vxm_u16m2_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1385,7 +1254,6 @@ vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsbc_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1395,7 +1263,6 @@ vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsbc_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1406,7 +1273,6 @@ vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
   return vmsbc_vvm_u16m4_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1417,7 +1283,6 @@ vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
   return vmsbc_vxm_u16m4_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1427,7 +1292,6 @@ vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsbc_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1437,7 +1301,6 @@ vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsbc_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1448,7 +1311,6 @@ vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
   return vmsbc_vvm_u16m8_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1459,7 +1321,6 @@ vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
   return vmsbc_vxm_u16m8_b2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1469,7 +1330,6 @@ vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsbc_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1339,6 @@ vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsbc_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1349,6 @@ vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsbc_vvm_u32mf2_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1359,6 @@ vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
   return vmsbc_vxm_u32mf2_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1369,6 @@ vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsbc_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1522,7 +1378,6 @@ vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsbc_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1533,7 +1388,6 @@ vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmsbc_vvm_u32m1_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1398,6 @@ vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
   return vmsbc_vxm_u32m1_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1407,6 @@ vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmsbc_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1416,6 @@ vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsbc_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1575,7 +1426,6 @@ vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmsbc_vvm_u32m2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1586,7 +1436,6 @@ vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
   return vmsbc_vxm_u32m2_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1596,7 +1445,6 @@ vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmsbc_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1454,6 @@ vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsbc_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1617,7 +1464,6 @@ vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
   return vmsbc_vvm_u32m4_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1628,7 +1474,6 @@ vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
   return vmsbc_vxm_u32m4_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsbc_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsbc_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1659,7 +1502,6 @@ vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
   return vmsbc_vvm_u32m8_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1670,7 +1512,6 @@ vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
   return vmsbc_vxm_u32m8_b4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1680,7 +1521,6 @@ vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsbc_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1690,7 +1530,6 @@ vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsbc_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1701,7 +1540,6 @@ vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmsbc_vvm_u64m1_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1712,7 +1550,6 @@ vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
   return vmsbc_vxm_u64m1_b64(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1722,7 +1559,6 @@ vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmsbc_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1732,7 +1568,6 @@ vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsbc_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1743,7 +1578,6 @@ vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmsbc_vvm_u64m2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1588,6 @@ vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
   return vmsbc_vxm_u64m2_b32(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1597,6 @@ vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmsbc_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1606,6 @@ vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsbc_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1616,6 @@ vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmsbc_vvm_u64m4_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1626,6 @@ vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
   return vmsbc_vxm_u64m4_b16(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1806,7 +1635,6 @@ vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmsbc_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1816,7 +1644,6 @@ vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsbc_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1827,7 +1654,6 @@ vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
   return vmsbc_vvm_u64m8_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1838,7 +1664,6 @@ vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
   return vmsbc_vxm_u64m8_b8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1848,7 +1673,6 @@ vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsbc_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
index ba6c750264895..736045c1aa6f7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) {
   return vmsbf_m_b1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) {
   return vmsbf_m_b2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) {
   return vmsbf_m_b4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) {
   return vmsbf_m_b8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) {
   return vmsbf_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) {
   return vmsbf_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) {
   return vmsbf_m_b64(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
   return vmsbf_m_b1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
   return vmsbf_m_b2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -107,7 +97,6 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
   return vmsbf_m_b4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -118,7 +107,6 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
   return vmsbf_m_b8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
   return vmsbf_m_b16_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
   return vmsbf_m_b32_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
index 6d004dbb34174..80d25a68af06e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmseq_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmseq_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmseq_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmseq_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmseq_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmseq_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmseq_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmseq_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmseq_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmseq_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmseq_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmseq_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmseq_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmseq_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmseq_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmseq_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmseq_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmseq_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmseq_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmseq_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmseq_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmseq_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmseq_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmseq_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmseq_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmseq_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmseq_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmseq_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmseq_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmseq_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmseq_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmseq_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmseq_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmseq_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmseq_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmseq_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmseq_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmseq_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -457,7 +412,6 @@ vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmseq_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -467,7 +421,6 @@ vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -477,7 +430,6 @@ vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmseq_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -487,7 +439,6 @@ vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -497,7 +448,6 @@ vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmseq_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -507,7 +457,6 @@ vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -517,7 +466,6 @@ vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmseq_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -527,7 +475,6 @@ vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -537,7 +484,6 @@ vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmseq_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -547,7 +493,6 @@ vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -557,7 +502,6 @@ vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmseq_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -567,7 +511,6 @@ vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -577,7 +520,6 @@ vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmseq_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -587,7 +529,6 @@ vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmseq_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -598,7 +539,6 @@ vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmseq_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -608,7 +548,6 @@ vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmseq_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -619,7 +558,6 @@ vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmseq_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -629,7 +567,6 @@ vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmseq_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -639,7 +576,6 @@ vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmseq_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -649,7 +585,6 @@ vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmseq_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -659,7 +594,6 @@ vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmseq_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +603,6 @@ vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmseq_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +612,6 @@ vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmseq_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -689,7 +621,6 @@ vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmseq_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -699,7 +630,6 @@ vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmseq_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -709,7 +639,6 @@ vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmseq_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -720,7 +649,6 @@ vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmseq_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -730,7 +658,6 @@ vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmseq_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -740,7 +667,6 @@ vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmseq_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -750,7 +676,6 @@ vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmseq_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -760,7 +685,6 @@ vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmseq_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -770,7 +694,6 @@ vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmseq_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -780,7 +703,6 @@ vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmseq_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -790,7 +712,6 @@ vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmseq_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -800,7 +721,6 @@ vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmseq_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -810,7 +730,6 @@ vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmseq_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -820,7 +739,6 @@ vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmseq_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -830,7 +748,6 @@ vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmseq_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -840,7 +757,6 @@ vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmseq_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -850,7 +766,6 @@ vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmseq_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -860,7 +775,6 @@ vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmseq_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -870,7 +784,6 @@ vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmseq_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmseq_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmseq_vx_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -912,7 +822,6 @@ vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -923,7 +832,6 @@ vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +842,6 @@ vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -945,7 +852,6 @@ vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +862,6 @@ vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -967,7 +872,6 @@ vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -978,7 +882,6 @@ vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -989,7 +892,6 @@ vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1000,7 +902,6 @@ vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +912,6 @@ vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1022,7 +922,6 @@ vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1033,7 +932,6 @@ vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +942,6 @@ vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1056,7 +953,6 @@ vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1067,7 +963,6 @@ vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +974,6 @@ vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1090,7 +984,6 @@ vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1101,7 +994,6 @@ vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1004,6 @@ vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1123,7 +1014,6 @@ vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1024,6 @@ vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1145,7 +1034,6 @@ vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1044,6 @@ vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1167,7 +1054,6 @@ vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1178,7 +1064,6 @@ vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1190,7 +1075,6 @@ vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1201,7 +1085,6 @@ vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1212,7 +1095,6 @@ vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1223,7 +1105,6 @@ vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1115,6 @@ vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1125,6 @@ vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1135,6 @@ vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1267,7 +1145,6 @@ vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1278,7 +1155,6 @@ vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1289,7 +1165,6 @@ vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1300,7 +1175,6 @@ vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1185,6 @@ vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1195,6 @@ vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1205,6 @@ vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1215,6 @@ vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1225,6 @@ vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1235,6 @@ vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1377,7 +1245,6 @@ vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1389,7 +1256,6 @@ vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1400,7 +1266,6 @@ vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1412,7 +1277,6 @@ vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1423,7 +1287,6 @@ vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1298,6 @@ vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1308,6 @@ vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1457,7 +1318,6 @@ vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1468,7 +1328,6 @@ vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1338,6 @@ vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1348,6 @@ vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1358,6 @@ vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1368,6 @@ vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1523,7 +1378,6 @@ vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1388,6 @@ vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1546,7 +1399,6 @@ vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1558,7 +1410,6 @@ vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1570,7 +1421,6 @@ vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1582,7 +1432,6 @@ vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1443,6 @@ vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1605,7 +1453,6 @@ vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1616,7 +1463,6 @@ vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1627,7 +1473,6 @@ vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1493,6 @@ vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1752,7 +1587,6 @@ vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1763,7 +1597,6 @@ vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1607,6 @@ vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1617,6 @@ vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1797,7 +1628,6 @@ vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1808,7 +1638,6 @@ vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1820,7 +1649,6 @@ vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1831,7 +1659,6 @@ vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1843,7 +1670,6 @@ vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1854,7 +1680,6 @@ vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1865,7 +1690,6 @@ vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
index dd2fadd3c9650..6f2f03940c232 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1.i64(i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 vbool1_t test_vmset_m_b1(size_t vl) { return vmset_m_b1(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1.i64(i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ vbool1_t test_vmset_m_b1(size_t vl) { return vmset_m_b1(vl); }
 //
 vbool2_t test_vmset_m_b2(size_t vl) { return vmset_m_b2(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1.i64(i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ vbool2_t test_vmset_m_b2(size_t vl) { return vmset_m_b2(vl); }
 //
 vbool4_t test_vmset_m_b4(size_t vl) { return vmset_m_b4(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1.i64(i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ vbool4_t test_vmset_m_b4(size_t vl) { return vmset_m_b4(vl); }
 //
 vbool8_t test_vmset_m_b8(size_t vl) { return vmset_m_b8(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1.i64(i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ vbool8_t test_vmset_m_b8(size_t vl) { return vmset_m_b8(vl); }
 //
 vbool16_t test_vmset_m_b16(size_t vl) { return vmset_m_b16(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1.i64(i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ vbool16_t test_vmset_m_b16(size_t vl) { return vmset_m_b16(vl); }
 //
 vbool32_t test_vmset_m_b32(size_t vl) { return vmset_m_b32(vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmset_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1.i64(i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
index d182c930a15fd..04f1151644ac3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsge_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsge_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsge_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsge_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsge_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsge_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsge_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsge_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsge_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsge_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsge_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsge_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsge_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsge_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsge_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsge_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsge_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsge_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsge_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsge_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsge_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsge_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vbool64_t test_vmsge_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl)
   return vmsge_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vbool64_t test_vmsge_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsge_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vbool32_t test_vmsge_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl)
   return vmsge_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vbool32_t test_vmsge_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsge_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vbool16_t test_vmsge_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsge_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vbool16_t test_vmsge_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsge_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vbool8_t test_vmsge_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsge_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vbool8_t test_vmsge_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsge_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vbool4_t test_vmsge_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsge_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vbool4_t test_vmsge_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsge_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vbool2_t test_vmsge_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsge_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vbool2_t test_vmsge_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsge_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vbool64_t test_vmsge_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl)
   return vmsge_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vbool64_t test_vmsge_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsge_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vbool32_t test_vmsge_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsge_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vbool32_t test_vmsge_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsge_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vbool16_t test_vmsge_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsge_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vbool16_t test_vmsge_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsge_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vbool8_t test_vmsge_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsge_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vbool8_t test_vmsge_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsge_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vbool4_t test_vmsge_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsge_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vbool4_t test_vmsge_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsge_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vbool64_t test_vmsge_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsge_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vbool64_t test_vmsge_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsge_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vbool32_t test_vmsge_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsge_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vbool32_t test_vmsge_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsge_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vbool16_t test_vmsge_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsge_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vbool16_t test_vmsge_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsge_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vbool8_t test_vmsge_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsge_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vbool8_t test_vmsge_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsge_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
   return vmsgeu_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
   return vmsgeu_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
   return vmsgeu_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vbool8_t test_vmsgeu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsgeu_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vbool8_t test_vmsgeu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vbool4_t test_vmsgeu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsgeu_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vbool4_t test_vmsgeu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vbool2_t test_vmsgeu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsgeu_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vbool2_t test_vmsgeu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vbool1_t test_vmsgeu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsgeu_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vbool1_t test_vmsgeu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsgeu_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t
   return vmsgeu_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl)
   return vmsgeu_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t
   return vmsgeu_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl)
   return vmsgeu_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vbool16_t test_vmsgeu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl)
   return vmsgeu_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vbool16_t test_vmsgeu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsgeu_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vbool8_t test_vmsgeu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsgeu_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vbool8_t test_vmsgeu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsgeu_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vbool4_t test_vmsgeu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsgeu_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vbool4_t test_vmsgeu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsgeu_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vbool2_t test_vmsgeu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsgeu_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vbool2_t test_vmsgeu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsgeu_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t
   return vmsgeu_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl)
   return vmsgeu_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vbool32_t test_vmsgeu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl)
   return vmsgeu_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vbool32_t test_vmsgeu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsgeu_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vbool16_t test_vmsgeu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl)
   return vmsgeu_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vbool16_t test_vmsgeu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsgeu_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vbool8_t test_vmsgeu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsgeu_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vbool8_t test_vmsgeu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsgeu_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vbool4_t test_vmsgeu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsgeu_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vbool4_t test_vmsgeu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsgeu_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vbool64_t test_vmsgeu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl)
   return vmsgeu_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vbool64_t test_vmsgeu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsgeu_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vbool32_t test_vmsgeu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl)
   return vmsgeu_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vbool32_t test_vmsgeu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsgeu_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vbool16_t test_vmsgeu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl)
   return vmsgeu_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vbool16_t test_vmsgeu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsgeu_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsgeu_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vbool8_t test_vmsgeu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsgeu_vx_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vbool64_t test_vmsge_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsge_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vbool64_t test_vmsge_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsge_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vbool32_t test_vmsge_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsge_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vbool32_t test_vmsge_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsge_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vbool16_t test_vmsge_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsge_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vbool16_t test_vmsge_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsge_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vbool8_t test_vmsge_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsge_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vbool8_t test_vmsge_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsge_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vbool4_t test_vmsge_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsge_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vbool4_t test_vmsge_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsge_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vbool2_t test_vmsge_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsge_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vbool2_t test_vmsge_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsge_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vbool1_t test_vmsge_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsge_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vbool1_t test_vmsge_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsge_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vbool64_t test_vmsge_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsge_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vbool64_t test_vmsge_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsge_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vbool32_t test_vmsge_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsge_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vbool32_t test_vmsge_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsge_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vbool16_t test_vmsge_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsge_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vbool16_t test_vmsge_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsge_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vbool8_t test_vmsge_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsge_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vbool8_t test_vmsge_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsge_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vbool4_t test_vmsge_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsge_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vbool4_t test_vmsge_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsge_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vbool2_t test_vmsge_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsge_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vbool2_t test_vmsge_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsge_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vbool64_t test_vmsge_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsge_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vbool64_t test_vmsge_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsge_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vbool32_t test_vmsge_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsge_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vbool32_t test_vmsge_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsge_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vbool16_t test_vmsge_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsge_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vbool16_t test_vmsge_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsge_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vbool8_t test_vmsge_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsge_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vbool8_t test_vmsge_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsge_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vbool4_t test_vmsge_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsge_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vbool4_t test_vmsge_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsge_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vbool64_t test_vmsge_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsge_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vbool64_t test_vmsge_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsge_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vbool32_t test_vmsge_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsge_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vbool32_t test_vmsge_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsge_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vbool16_t test_vmsge_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsge_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vbool16_t test_vmsge_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsge_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vbool8_t test_vmsge_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsge_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vbool8_t test_vmsge_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsge_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgeu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgeu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgeu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgeu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgeu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgeu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgeu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgeu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgeu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgeu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgeu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgeu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgeu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgeu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgeu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgeu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgeu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgeu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgeu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgeu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
   return vmsgeu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
index 925cdc6e6289b..8eeacfa1572c2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsgt_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsgt_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsgt_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsgt_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsgt_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsgt_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsgt_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsgt_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsgt_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsgt_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsgt_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsgt_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsgt_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsgt_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsgt_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsgt_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsgt_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsgt_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsgt_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsgt_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsgt_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsgt_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vbool64_t test_vmsgt_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl)
   return vmsgt_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vbool64_t test_vmsgt_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsgt_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vbool32_t test_vmsgt_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl)
   return vmsgt_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vbool32_t test_vmsgt_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsgt_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vbool16_t test_vmsgt_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsgt_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vbool16_t test_vmsgt_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsgt_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vbool8_t test_vmsgt_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsgt_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vbool8_t test_vmsgt_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsgt_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vbool4_t test_vmsgt_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsgt_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vbool4_t test_vmsgt_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsgt_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vbool2_t test_vmsgt_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsgt_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vbool2_t test_vmsgt_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsgt_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vbool64_t test_vmsgt_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl)
   return vmsgt_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vbool64_t test_vmsgt_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsgt_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vbool32_t test_vmsgt_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsgt_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vbool32_t test_vmsgt_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsgt_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vbool16_t test_vmsgt_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsgt_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vbool16_t test_vmsgt_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsgt_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vbool8_t test_vmsgt_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsgt_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vbool8_t test_vmsgt_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsgt_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vbool4_t test_vmsgt_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsgt_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vbool4_t test_vmsgt_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsgt_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vbool64_t test_vmsgt_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsgt_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vbool64_t test_vmsgt_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsgt_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vbool32_t test_vmsgt_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsgt_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vbool32_t test_vmsgt_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsgt_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vbool16_t test_vmsgt_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsgt_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vbool16_t test_vmsgt_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsgt_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsgt_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vbool8_t test_vmsgt_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsgt_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
   return vmsgtu_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
   return vmsgtu_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
   return vmsgtu_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vbool8_t test_vmsgtu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsgtu_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vbool8_t test_vmsgtu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vbool4_t test_vmsgtu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsgtu_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vbool4_t test_vmsgtu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vbool2_t test_vmsgtu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsgtu_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vbool2_t test_vmsgtu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vbool1_t test_vmsgtu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsgtu_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vbool1_t test_vmsgtu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsgtu_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t
   return vmsgtu_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl)
   return vmsgtu_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t
   return vmsgtu_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl)
   return vmsgtu_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vbool16_t test_vmsgtu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl)
   return vmsgtu_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vbool16_t test_vmsgtu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsgtu_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vbool8_t test_vmsgtu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsgtu_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vbool8_t test_vmsgtu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsgtu_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vbool4_t test_vmsgtu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsgtu_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vbool4_t test_vmsgtu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsgtu_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vbool2_t test_vmsgtu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsgtu_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vbool2_t test_vmsgtu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsgtu_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t
   return vmsgtu_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl)
   return vmsgtu_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vbool32_t test_vmsgtu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl)
   return vmsgtu_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vbool32_t test_vmsgtu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsgtu_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vbool16_t test_vmsgtu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl)
   return vmsgtu_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vbool16_t test_vmsgtu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsgtu_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vbool8_t test_vmsgtu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsgtu_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vbool8_t test_vmsgtu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsgtu_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vbool4_t test_vmsgtu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsgtu_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vbool4_t test_vmsgtu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsgtu_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vbool64_t test_vmsgtu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl)
   return vmsgtu_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vbool64_t test_vmsgtu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsgtu_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vbool32_t test_vmsgtu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl)
   return vmsgtu_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vbool32_t test_vmsgtu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsgtu_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vbool16_t test_vmsgtu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl)
   return vmsgtu_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vbool16_t test_vmsgtu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsgtu_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsgtu_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vbool8_t test_vmsgtu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsgtu_vx_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsgt_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsgt_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsgt_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsgt_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsgt_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsgt_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vbool8_t test_vmsgt_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsgt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vbool8_t test_vmsgt_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsgt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vbool4_t test_vmsgt_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsgt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vbool4_t test_vmsgt_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsgt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vbool2_t test_vmsgt_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsgt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vbool2_t test_vmsgt_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsgt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vbool1_t test_vmsgt_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsgt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vbool1_t test_vmsgt_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsgt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsgt_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsgt_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsgt_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsgt_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vbool16_t test_vmsgt_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsgt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vbool16_t test_vmsgt_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsgt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vbool8_t test_vmsgt_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsgt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vbool8_t test_vmsgt_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsgt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vbool4_t test_vmsgt_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsgt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vbool4_t test_vmsgt_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsgt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vbool2_t test_vmsgt_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsgt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vbool2_t test_vmsgt_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsgt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsgt_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsgt_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vbool32_t test_vmsgt_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsgt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vbool32_t test_vmsgt_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsgt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vbool16_t test_vmsgt_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsgt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vbool16_t test_vmsgt_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsgt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vbool8_t test_vmsgt_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsgt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vbool8_t test_vmsgt_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsgt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vbool4_t test_vmsgt_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsgt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vbool4_t test_vmsgt_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsgt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vbool64_t test_vmsgt_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsgt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vbool64_t test_vmsgt_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsgt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vbool32_t test_vmsgt_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsgt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vbool32_t test_vmsgt_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsgt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vbool16_t test_vmsgt_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsgt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vbool16_t test_vmsgt_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsgt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsgt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vbool8_t test_vmsgt_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsgt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgtu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgtu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgtu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgtu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgtu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgtu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgtu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgtu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgtu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgtu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgtu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgtu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgtu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgtu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgtu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgtu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgtu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgtu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgtu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgtu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
   return vmsgtu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
index 2072929797194..a0f3baefcd469 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) {
   return vmsif_m_b1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) {
   return vmsif_m_b2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) {
   return vmsif_m_b4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) {
   return vmsif_m_b8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) {
   return vmsif_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) {
   return vmsif_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) {
   return vmsif_m_b64(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
   return vmsif_m_b1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
   return vmsif_m_b2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -107,7 +97,6 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
   return vmsif_m_b4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -118,7 +107,6 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
   return vmsif_m_b8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
   return vmsif_m_b16_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
   return vmsif_m_b32_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
index d8460a5580b67..b481452032972 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsle_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsle_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsle_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsle_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsle_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsle_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsle_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsle_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsle_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsle_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsle_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsle_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsle_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsle_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsle_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsle_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsle_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsle_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsle_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsle_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsle_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsle_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsle_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsle_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsle_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsle_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsle_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsle_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsle_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsle_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsle_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsle_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsle_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsle_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsle_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsle_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsle_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsle_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +413,6 @@ vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmsleu_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +422,6 @@ vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -479,7 +432,6 @@ vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmsleu_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -489,7 +441,6 @@ vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +451,6 @@ vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmsleu_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +460,6 @@ vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -520,7 +469,6 @@ vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsleu_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -530,7 +478,6 @@ vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -540,7 +487,6 @@ vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsleu_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -550,7 +496,6 @@ vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -560,7 +505,6 @@ vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsleu_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -570,7 +514,6 @@ vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -580,7 +523,6 @@ vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsleu_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -590,7 +532,6 @@ vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsleu_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -601,7 +542,6 @@ vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsleu_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -611,7 +551,6 @@ vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsleu_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -622,7 +561,6 @@ vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsleu_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -632,7 +570,6 @@ vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsleu_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -643,7 +580,6 @@ vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmsleu_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -653,7 +589,6 @@ vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsleu_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -663,7 +598,6 @@ vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsleu_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -673,7 +607,6 @@ vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsleu_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -683,7 +616,6 @@ vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsleu_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -693,7 +625,6 @@ vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsleu_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -703,7 +634,6 @@ vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsleu_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -713,7 +643,6 @@ vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsleu_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +653,6 @@ vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsleu_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +662,6 @@ vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsleu_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -745,7 +672,6 @@ vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmsleu_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -755,7 +681,6 @@ vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsleu_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -766,7 +691,6 @@ vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmsleu_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -776,7 +700,6 @@ vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsleu_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -786,7 +709,6 @@ vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsleu_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -796,7 +718,6 @@ vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsleu_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -806,7 +727,6 @@ vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsleu_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -816,7 +736,6 @@ vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsleu_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +746,6 @@ vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmsleu_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +755,6 @@ vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsleu_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +765,6 @@ vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmsleu_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -858,7 +774,6 @@ vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsleu_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +784,6 @@ vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmsleu_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +793,6 @@ vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsleu_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +802,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsleu_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -899,7 +811,6 @@ vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsleu_vx_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -910,7 +821,6 @@ vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -921,7 +831,6 @@ vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +841,6 @@ vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -943,7 +851,6 @@ vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +861,6 @@ vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -965,7 +871,6 @@ vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +881,6 @@ vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +891,6 @@ vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -998,7 +901,6 @@ vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +911,6 @@ vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +921,6 @@ vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +931,6 @@ vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1042,7 +941,6 @@ vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsle_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +951,6 @@ vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsle_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +962,6 @@ vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +972,6 @@ vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1088,7 +983,6 @@ vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +993,6 @@ vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1003,6 @@ vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1013,6 @@ vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1023,6 @@ vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1143,7 +1033,6 @@ vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1043,6 @@ vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1053,6 @@ vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1176,7 +1063,6 @@ vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1073,6 @@ vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1199,7 +1084,6 @@ vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1210,7 +1094,6 @@ vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1104,6 @@ vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1232,7 +1114,6 @@ vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1243,7 +1124,6 @@ vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1134,6 @@ vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1144,6 @@ vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1154,6 @@ vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1287,7 +1164,6 @@ vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1174,6 @@ vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1309,7 +1184,6 @@ vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1320,7 +1194,6 @@ vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1331,7 +1204,6 @@ vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1342,7 +1214,6 @@ vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1224,6 @@ vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1234,6 @@ vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1244,6 @@ vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1254,6 @@ vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1398,7 +1265,6 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1275,6 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1421,7 +1286,6 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1296,6 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1307,6 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1455,7 +1317,6 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1466,7 +1327,6 @@ vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1477,7 +1337,6 @@ vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1488,7 +1347,6 @@ vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1499,7 +1357,6 @@ vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1510,7 +1367,6 @@ vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1521,7 +1377,6 @@ vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1532,7 +1387,6 @@ vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsleu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1397,6 @@ vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsleu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1408,6 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1419,6 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1430,6 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1441,6 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1603,7 +1452,6 @@ vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1462,6 @@ vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1626,7 +1473,6 @@ vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1483,6 @@ vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1494,6 @@ vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1504,6 @@ vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1515,6 @@ vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1525,6 @@ vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1536,6 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1547,6 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1719,7 +1558,6 @@ vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1568,6 @@ vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1742,7 +1579,6 @@ vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1589,6 @@ vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1765,7 +1600,6 @@ vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1610,6 @@ vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1788,7 +1621,6 @@ vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1631,6 @@ vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1811,7 +1642,6 @@ vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1652,6 @@ vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1834,7 +1663,6 @@ vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1673,6 @@ vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1857,7 +1684,6 @@ vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1694,6 @@ vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1880,7 +1705,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
index e95d15af08e77..267e3671bd60b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmslt_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmslt_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmslt_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmslt_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmslt_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmslt_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmslt_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmslt_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmslt_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmslt_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmslt_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmslt_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmslt_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmslt_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmslt_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmslt_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmslt_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmslt_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmslt_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmslt_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmslt_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmslt_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmslt_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmslt_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmslt_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmslt_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmslt_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmslt_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmslt_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmslt_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmslt_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmslt_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmslt_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmslt_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmslt_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmslt_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmslt_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmslt_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +413,6 @@ vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmsltu_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +422,6 @@ vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -479,7 +432,6 @@ vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmsltu_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -489,7 +441,6 @@ vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +451,6 @@ vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmsltu_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +460,6 @@ vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -520,7 +469,6 @@ vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsltu_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -530,7 +478,6 @@ vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -540,7 +487,6 @@ vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsltu_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -550,7 +496,6 @@ vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -560,7 +505,6 @@ vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsltu_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -570,7 +514,6 @@ vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -580,7 +523,6 @@ vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsltu_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -590,7 +532,6 @@ vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsltu_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -601,7 +542,6 @@ vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsltu_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -611,7 +551,6 @@ vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsltu_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -622,7 +561,6 @@ vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsltu_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -632,7 +570,6 @@ vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsltu_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -643,7 +580,6 @@ vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmsltu_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -653,7 +589,6 @@ vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsltu_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -663,7 +598,6 @@ vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsltu_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -673,7 +607,6 @@ vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsltu_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -683,7 +616,6 @@ vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsltu_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -693,7 +625,6 @@ vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsltu_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -703,7 +634,6 @@ vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsltu_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -713,7 +643,6 @@ vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsltu_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +653,6 @@ vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsltu_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +662,6 @@ vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsltu_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -745,7 +672,6 @@ vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmsltu_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -755,7 +681,6 @@ vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsltu_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -766,7 +691,6 @@ vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmsltu_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -776,7 +700,6 @@ vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsltu_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -786,7 +709,6 @@ vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsltu_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -796,7 +718,6 @@ vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsltu_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -806,7 +727,6 @@ vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsltu_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -816,7 +736,6 @@ vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsltu_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +746,6 @@ vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmsltu_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +755,6 @@ vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsltu_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +765,6 @@ vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmsltu_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -858,7 +774,6 @@ vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsltu_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +784,6 @@ vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmsltu_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +793,6 @@ vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsltu_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +802,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsltu_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -899,7 +811,6 @@ vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsltu_vx_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -910,7 +821,6 @@ vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -921,7 +831,6 @@ vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +841,6 @@ vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -943,7 +851,6 @@ vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +861,6 @@ vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -965,7 +871,6 @@ vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +881,6 @@ vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +891,6 @@ vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -998,7 +901,6 @@ vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +911,6 @@ vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +921,6 @@ vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +931,6 @@ vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1042,7 +941,6 @@ vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmslt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +951,6 @@ vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmslt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +962,6 @@ vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +972,6 @@ vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1088,7 +983,6 @@ vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +993,6 @@ vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1003,6 @@ vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1013,6 @@ vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1023,6 @@ vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1143,7 +1033,6 @@ vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1043,6 @@ vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1053,6 @@ vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1176,7 +1063,6 @@ vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1073,6 @@ vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1199,7 +1084,6 @@ vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1210,7 +1094,6 @@ vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1104,6 @@ vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1232,7 +1114,6 @@ vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1243,7 +1124,6 @@ vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1134,6 @@ vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1144,6 @@ vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1154,6 @@ vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1287,7 +1164,6 @@ vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1174,6 @@ vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1309,7 +1184,6 @@ vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1320,7 +1194,6 @@ vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1331,7 +1204,6 @@ vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1342,7 +1214,6 @@ vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1224,6 @@ vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1234,6 @@ vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1244,6 @@ vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1254,6 @@ vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1398,7 +1265,6 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1275,6 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1421,7 +1286,6 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1296,6 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1307,6 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1455,7 +1317,6 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1466,7 +1327,6 @@ vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1477,7 +1337,6 @@ vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1488,7 +1347,6 @@ vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1499,7 +1357,6 @@ vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1510,7 +1367,6 @@ vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1521,7 +1377,6 @@ vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1532,7 +1387,6 @@ vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsltu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1397,6 @@ vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsltu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1408,6 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1419,6 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1430,6 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1441,6 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1603,7 +1452,6 @@ vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1462,6 @@ vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1626,7 +1473,6 @@ vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1483,6 @@ vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1494,6 @@ vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1504,6 @@ vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1515,6 @@ vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1525,6 @@ vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1536,6 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1547,6 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1719,7 +1558,6 @@ vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1568,6 @@ vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1742,7 +1579,6 @@ vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1589,6 @@ vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1765,7 +1600,6 @@ vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1610,6 @@ vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1788,7 +1621,6 @@ vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1631,6 @@ vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1811,7 +1642,6 @@ vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1652,6 @@ vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1834,7 +1663,6 @@ vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1673,6 @@ vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1857,7 +1684,6 @@ vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1694,6 @@ vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1880,7 +1705,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
index 51630c829da2e..7b830321cae81 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsne_vv_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsne_vv_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsne_vv_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsne_vv_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsne_vv_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsne_vv_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsne_vv_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsne_vx_i8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsne_vv_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsne_vx_i16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsne_vv_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsne_vx_i16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsne_vv_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsne_vx_i16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsne_vv_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsne_vx_i16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsne_vv_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsne_vx_i16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsne_vv_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsne_vx_i16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsne_vv_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsne_vx_i32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsne_vv_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsne_vx_i32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsne_vv_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsne_vx_i32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsne_vv_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsne_vx_i32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsne_vv_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsne_vx_i32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsne_vv_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsne_vx_i64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsne_vv_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsne_vx_i64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsne_vv_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsne_vx_i64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsne_vv_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsne_vx_i64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -457,7 +412,6 @@ vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmsne_vv_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -467,7 +421,6 @@ vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8mf8_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -477,7 +430,6 @@ vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmsne_vv_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -487,7 +439,6 @@ vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8mf4_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -497,7 +448,6 @@ vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmsne_vv_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -507,7 +457,6 @@ vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8mf2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -517,7 +466,6 @@ vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsne_vv_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -527,7 +475,6 @@ vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8m1_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -537,7 +484,6 @@ vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsne_vv_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -547,7 +493,6 @@ vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8m2_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -557,7 +502,6 @@ vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsne_vv_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -567,7 +511,6 @@ vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8m4_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -577,7 +520,6 @@ vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsne_vv_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -587,7 +529,6 @@ vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsne_vx_u8m8_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -598,7 +539,6 @@ vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsne_vv_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -608,7 +548,6 @@ vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsne_vx_u16mf4_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -619,7 +558,6 @@ vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsne_vv_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -629,7 +567,6 @@ vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsne_vx_u16mf2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -639,7 +576,6 @@ vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmsne_vv_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -649,7 +585,6 @@ vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsne_vx_u16m1_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -659,7 +594,6 @@ vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsne_vv_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +603,6 @@ vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsne_vx_u16m2_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +612,6 @@ vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsne_vv_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -689,7 +621,6 @@ vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsne_vx_u16m4_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -699,7 +630,6 @@ vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsne_vv_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -709,7 +639,6 @@ vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsne_vx_u16m8_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -720,7 +649,6 @@ vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsne_vv_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -730,7 +658,6 @@ vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsne_vx_u32mf2_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -740,7 +667,6 @@ vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmsne_vv_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -750,7 +676,6 @@ vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsne_vx_u32m1_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -760,7 +685,6 @@ vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmsne_vv_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -770,7 +694,6 @@ vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsne_vx_u32m2_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -780,7 +703,6 @@ vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsne_vv_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -790,7 +712,6 @@ vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsne_vx_u32m4_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -800,7 +721,6 @@ vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsne_vv_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -810,7 +730,6 @@ vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsne_vx_u32m8_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -820,7 +739,6 @@ vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmsne_vv_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -830,7 +748,6 @@ vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsne_vx_u64m1_b64(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -840,7 +757,6 @@ vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmsne_vv_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -850,7 +766,6 @@ vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsne_vx_u64m2_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -860,7 +775,6 @@ vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmsne_vv_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -870,7 +784,6 @@ vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsne_vx_u64m4_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsne_vv_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsne_vx_u64m8_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -912,7 +822,6 @@ vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -923,7 +832,6 @@ vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +842,6 @@ vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -945,7 +852,6 @@ vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +862,6 @@ vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -967,7 +872,6 @@ vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -978,7 +882,6 @@ vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -989,7 +892,6 @@ vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1000,7 +902,6 @@ vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +912,6 @@ vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1022,7 +922,6 @@ vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1033,7 +932,6 @@ vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +942,6 @@ vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1056,7 +953,6 @@ vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1067,7 +963,6 @@ vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +974,6 @@ vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1090,7 +984,6 @@ vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1101,7 +994,6 @@ vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1004,6 @@ vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1123,7 +1014,6 @@ vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1024,6 @@ vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1145,7 +1034,6 @@ vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1044,6 @@ vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1167,7 +1054,6 @@ vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1178,7 +1064,6 @@ vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1190,7 +1075,6 @@ vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1201,7 +1085,6 @@ vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1212,7 +1095,6 @@ vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1223,7 +1105,6 @@ vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1115,6 @@ vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1125,6 @@ vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1135,6 @@ vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1267,7 +1145,6 @@ vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1278,7 +1155,6 @@ vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1289,7 +1165,6 @@ vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1300,7 +1175,6 @@ vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1185,6 @@ vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1195,6 @@ vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1205,6 @@ vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1215,6 @@ vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1225,6 @@ vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1235,6 @@ vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1377,7 +1245,6 @@ vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1389,7 +1256,6 @@ vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1400,7 +1266,6 @@ vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1412,7 +1277,6 @@ vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1423,7 +1287,6 @@ vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1298,6 @@ vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1308,6 @@ vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1457,7 +1318,6 @@ vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1468,7 +1328,6 @@ vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1338,6 @@ vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1348,6 @@ vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1358,6 @@ vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1368,6 @@ vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1523,7 +1378,6 @@ vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1388,6 @@ vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1546,7 +1399,6 @@ vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1558,7 +1410,6 @@ vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1570,7 +1421,6 @@ vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1582,7 +1432,6 @@ vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1443,6 @@ vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1605,7 +1453,6 @@ vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1616,7 +1463,6 @@ vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1627,7 +1473,6 @@ vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1493,6 @@ vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1752,7 +1587,6 @@ vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1763,7 +1597,6 @@ vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1607,6 @@ vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1617,6 @@ vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1797,7 +1628,6 @@ vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1808,7 +1638,6 @@ vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1820,7 +1649,6 @@ vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1831,7 +1659,6 @@ vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1843,7 +1670,6 @@ vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1854,7 +1680,6 @@ vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1865,7 +1690,6 @@ vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
index 2771c14de739f..ac6e551cf883c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) {
   return vmsof_m_b1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) {
   return vmsof_m_b2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) {
   return vmsof_m_b4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) {
   return vmsof_m_b8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) {
   return vmsof_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) {
   return vmsof_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) {
   return vmsof_m_b64(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
   return vmsof_m_b1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
   return vmsof_m_b2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -107,7 +97,6 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
   return vmsof_m_b4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -118,7 +107,6 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
   return vmsof_m_b8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
   return vmsof_m_b16_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
   return vmsof_m_b32_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
index 232c6cbb5a59b..f7d7e8b0df180 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmul_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmul_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmul_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmul_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmul_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmul_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmul_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmul_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmul_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmul_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmul_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmul_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmul_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmul_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmul_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmul_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmul_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmul_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmul_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmul_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmul_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmul_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmul_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmul_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmul_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmul_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmul_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmul_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmul_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmul_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmul_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmul_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmul_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmul_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmul_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmul_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmul_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmul_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmul_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmul_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmul_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmul_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmul_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmul_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmul_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmul_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vmul_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmul_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vmul_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmul_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmul_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmul_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmul_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmul_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmul_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmul_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmul_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmul_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vmul_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmul_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmul_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmul_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmul_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmul_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmul_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmul_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmul_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmul_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmul_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmul_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmul_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmul_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmul_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmul_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmul_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmul_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmulh_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmulh_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmulh_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmulh_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmulh_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmulh_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmulh_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmulh_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmulh_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmulh_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmulh_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmulh_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmulh_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmulh_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmulh_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmulh_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmulh_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmulh_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmulh_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmulh_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmulh_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmulh_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmulh_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmulh_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmulh_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmulh_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmulh_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmulh_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmulh_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmulh_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmulh_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmulh_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmulh_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmulh_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmulh_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmulh_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmulh_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmulh_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmulhu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmulhu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmulhu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmulhu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmulhu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmulhu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmulhu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmulhu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
   return vmulhu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmulhu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
   return vmulhu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmulhu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmulhu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmulhu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmulhu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmulhu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmulhu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmulhu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmulhu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmulhu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
   return vmulhu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmulhu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmulhu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmulhu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmulhu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmulhu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmulhu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmulhu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmulhu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmulhu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmulhu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmulhu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmulhu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmulhu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmulhu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmulhu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmulhu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1588,6 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmulhu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1597,6 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmulhsu_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1784,7 +1606,6 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1794,7 +1615,6 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmulhsu_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1804,7 +1624,6 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1814,7 +1633,6 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmulhsu_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1824,7 +1642,6 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1834,7 +1651,6 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmulhsu_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1844,7 +1660,6 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1854,7 +1669,6 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmulhsu_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1864,7 +1678,6 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1874,7 +1687,6 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmulhsu_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1884,7 +1696,6 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1894,7 +1705,6 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmulhsu_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1904,7 +1714,6 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
   return vmulhsu_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1914,7 +1723,6 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vmulhsu_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1924,7 +1732,6 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmulhsu_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1934,7 +1741,6 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vmulhsu_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1944,7 +1750,6 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmulhsu_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1954,7 +1759,6 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmulhsu_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1964,7 +1768,6 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
   return vmulhsu_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1974,7 +1777,6 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmulhsu_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1984,7 +1786,6 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
   return vmulhsu_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1994,7 +1795,6 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmulhsu_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2004,7 +1804,6 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
   return vmulhsu_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2014,7 +1813,6 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmulhsu_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2024,7 +1822,6 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
   return vmulhsu_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2034,7 +1831,6 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vmulhsu_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2044,7 +1840,6 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmulhsu_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2054,7 +1849,6 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmulhsu_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2064,7 +1858,6 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
   return vmulhsu_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2074,7 +1867,6 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmulhsu_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2084,7 +1876,6 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
   return vmulhsu_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2094,7 +1885,6 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmulhsu_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2104,7 +1894,6 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
   return vmulhsu_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2114,7 +1903,6 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmulhsu_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2124,7 +1912,6 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
   return vmulhsu_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2134,7 +1921,6 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmulhsu_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2144,7 +1930,6 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
   return vmulhsu_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2154,7 +1939,6 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmulhsu_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2164,7 +1948,6 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
   return vmulhsu_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2174,7 +1957,6 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmulhsu_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2184,7 +1966,6 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
   return vmulhsu_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2194,7 +1975,6 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmulhsu_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2204,7 +1984,6 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
   return vmulhsu_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2214,7 +1993,6 @@ vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2224,7 +2002,6 @@ vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2234,7 +2011,6 @@ vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2244,7 +2020,6 @@ vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2254,7 +2029,6 @@ vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2264,7 +2038,6 @@ vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2274,7 +2047,6 @@ vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2284,7 +2056,6 @@ vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2294,7 +2065,6 @@ vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2304,7 +2074,6 @@ vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2314,7 +2083,6 @@ vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2324,7 +2092,6 @@ vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2334,7 +2101,6 @@ vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2344,7 +2110,6 @@ vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2354,7 +2119,6 @@ vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2364,7 +2128,6 @@ vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2374,7 +2137,6 @@ vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2384,7 +2146,6 @@ vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2394,7 +2155,6 @@ vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2404,7 +2164,6 @@ vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2414,7 +2173,6 @@ vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2424,7 +2182,6 @@ vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2434,7 +2191,6 @@ vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2444,7 +2200,6 @@ vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2454,7 +2209,6 @@ vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2464,7 +2218,6 @@ vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2474,7 +2227,6 @@ vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2484,7 +2236,6 @@ vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2494,7 +2245,6 @@ vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2504,7 +2254,6 @@ vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2514,7 +2263,6 @@ vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2524,7 +2272,6 @@ vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2534,7 +2281,6 @@ vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2544,7 +2290,6 @@ vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2554,7 +2299,6 @@ vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2564,7 +2308,6 @@ vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2574,7 +2317,6 @@ vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2584,7 +2326,6 @@ vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2594,7 +2335,6 @@ vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2604,7 +2344,6 @@ vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2614,7 +2353,6 @@ vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2624,7 +2362,6 @@ vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2634,7 +2371,6 @@ vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2644,7 +2380,6 @@ vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2654,7 +2389,6 @@ vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vmul_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2664,7 +2398,6 @@ vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vmul_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2674,7 +2407,6 @@ vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vmul_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2684,7 +2416,6 @@ vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vmul_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2694,7 +2425,6 @@ vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vmul_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2704,7 +2434,6 @@ vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vmul_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2714,7 +2443,6 @@ vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vmul_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2724,7 +2452,6 @@ vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vmul_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2734,7 +2461,6 @@ vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vmul_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2744,7 +2470,6 @@ vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vmul_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2754,7 +2479,6 @@ vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vmul_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2764,7 +2488,6 @@ vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vmul_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2774,7 +2497,6 @@ vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vmul_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2784,7 +2506,6 @@ vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vmul_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2794,7 +2515,6 @@ vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vmul_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2804,7 +2524,6 @@ vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vmul_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2814,7 +2533,6 @@ vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vmul_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2824,7 +2542,6 @@ vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vmul_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2834,7 +2551,6 @@ vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vmul_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2844,7 +2560,6 @@ vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vmul_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2854,7 +2569,6 @@ vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vmul_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2864,7 +2578,6 @@ vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vmul_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2874,7 +2587,6 @@ vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vmul_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2884,7 +2596,6 @@ vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vmul_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2894,7 +2605,6 @@ vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vmul_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2904,7 +2614,6 @@ vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vmul_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2914,7 +2623,6 @@ vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vmul_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2924,7 +2632,6 @@ vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vmul_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2934,7 +2641,6 @@ vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vmul_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2944,7 +2650,6 @@ vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vmul_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2954,7 +2659,6 @@ vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vmul_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2964,7 +2668,6 @@ vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vmul_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2974,7 +2677,6 @@ vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vmul_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2984,7 +2686,6 @@ vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vmul_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2994,7 +2695,6 @@ vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vmul_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3004,7 +2704,6 @@ vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vmul_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3014,7 +2713,6 @@ vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vmul_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3024,7 +2722,6 @@ vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vmul_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3034,7 +2731,6 @@ vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vmul_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3044,7 +2740,6 @@ vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vmul_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3054,7 +2749,6 @@ vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vmul_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3064,7 +2758,6 @@ vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vmul_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3074,7 +2767,6 @@ vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vmul_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3084,7 +2776,6 @@ vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vmul_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3094,7 +2785,6 @@ vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
   return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3104,7 +2794,6 @@ vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
   return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3114,7 +2803,6 @@ vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
   return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3124,7 +2812,6 @@ vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
   return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3134,7 +2821,6 @@ vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
   return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3144,7 +2830,6 @@ vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
   return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3154,7 +2839,6 @@ vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
   return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3164,7 +2848,6 @@ vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
   return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3174,7 +2857,6 @@ vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
   return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3184,7 +2866,6 @@ vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
   return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3194,7 +2875,6 @@ vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
   return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3204,7 +2884,6 @@ vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
   return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3214,7 +2893,6 @@ vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
   return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3224,7 +2902,6 @@ vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
   return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3234,7 +2911,6 @@ vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16
   return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3244,7 +2920,6 @@ vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16
   return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3254,7 +2929,6 @@ vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16
   return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3264,7 +2938,6 @@ vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16
   return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3274,7 +2947,6 @@ vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
   return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3284,7 +2956,6 @@ vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
   return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3294,7 +2965,6 @@ vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3304,7 +2974,6 @@ vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3314,7 +2983,6 @@ vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3324,7 +2992,6 @@ vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3334,7 +3001,6 @@ vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3344,7 +3010,6 @@ vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3354,7 +3019,6 @@ vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32
   return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3364,7 +3028,6 @@ vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32
   return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3374,7 +3037,6 @@ vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
   return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3384,7 +3046,6 @@ vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
   return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3394,7 +3055,6 @@ vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
   return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3404,7 +3064,6 @@ vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
   return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3414,7 +3073,6 @@ vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3424,7 +3082,6 @@ vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3434,7 +3091,6 @@ vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3444,7 +3100,6 @@ vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3454,7 +3109,6 @@ vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
   return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3464,7 +3118,6 @@ vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
   return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3474,7 +3127,6 @@ vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
   return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3484,7 +3136,6 @@ vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
   return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3494,7 +3145,6 @@ vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
   return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3504,7 +3154,6 @@ vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
   return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3514,7 +3163,6 @@ vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3524,7 +3172,6 @@ vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3534,7 +3181,6 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
   return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3544,7 +3190,6 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
   return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3554,7 +3199,6 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
   return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3564,7 +3208,6 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
   return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3574,7 +3217,6 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
   return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3584,7 +3226,6 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
   return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3594,7 +3235,6 @@ vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3604,7 +3244,6 @@ vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3614,7 +3253,6 @@ vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3624,7 +3262,6 @@ vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3634,7 +3271,6 @@ vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3644,7 +3280,6 @@ vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3654,7 +3289,6 @@ vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3664,7 +3298,6 @@ vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3674,7 +3307,6 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui
   return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3684,7 +3316,6 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui
   return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3694,7 +3325,6 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui
   return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3704,7 +3334,6 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui
   return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3714,7 +3343,6 @@ vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint1
   return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3724,7 +3352,6 @@ vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint1
   return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3734,7 +3361,6 @@ vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16
   return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3744,7 +3370,6 @@ vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16
   return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3754,7 +3379,6 @@ vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16
   return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3764,7 +3388,6 @@ vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16
   return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3774,7 +3397,6 @@ vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16
   return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3784,7 +3406,6 @@ vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16
   return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3794,7 +3415,6 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui
   return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3804,7 +3424,6 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui
   return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3814,7 +3433,6 @@ vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint3
   return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3824,7 +3442,6 @@ vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint3
   return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3834,7 +3451,6 @@ vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint3
   return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3844,7 +3460,6 @@ vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint3
   return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3854,7 +3469,6 @@ vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32
   return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3864,7 +3478,6 @@ vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32
   return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3874,7 +3487,6 @@ vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32
   return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3884,7 +3496,6 @@ vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32
   return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3894,7 +3505,6 @@ vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint6
   return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3904,7 +3514,6 @@ vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint6
   return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3914,7 +3523,6 @@ vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint6
   return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3924,7 +3532,6 @@ vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint6
   return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3934,7 +3541,6 @@ vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint6
   return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3944,7 +3550,6 @@ vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint6
   return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3954,7 +3559,6 @@ vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64
   return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3964,7 +3568,6 @@ vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64
   return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3974,7 +3577,6 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
   return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3984,7 +3586,6 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
   return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3994,7 +3595,6 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
   return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4004,7 +3604,6 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
   return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4014,7 +3613,6 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
   return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4024,7 +3622,6 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
   return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4034,7 +3631,6 @@ vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
   return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4044,7 +3640,6 @@ vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
   return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4054,7 +3649,6 @@ vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
   return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4064,7 +3658,6 @@ vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
   return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4074,7 +3667,6 @@ vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
   return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4084,7 +3676,6 @@ vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
   return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4094,7 +3685,6 @@ vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
   return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4104,7 +3694,6 @@ vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
   return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4114,7 +3703,6 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint
   return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4124,7 +3712,6 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint
   return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4134,7 +3721,6 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint
   return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4144,7 +3730,6 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint
   return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4154,7 +3739,6 @@ vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m
   return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4164,7 +3748,6 @@ vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m
   return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4174,7 +3757,6 @@ vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2
   return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4184,7 +3766,6 @@ vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2
   return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4194,7 +3775,6 @@ vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4
   return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4204,7 +3784,6 @@ vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4
   return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4214,7 +3793,6 @@ vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8
   return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4224,7 +3802,6 @@ vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8
   return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4234,7 +3811,6 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint
   return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4244,7 +3820,6 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint
   return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4254,7 +3829,6 @@ vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m
   return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4264,7 +3838,6 @@ vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m
   return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4274,7 +3847,6 @@ vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m
   return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4284,7 +3856,6 @@ vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m
   return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4294,7 +3865,6 @@ vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4
   return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4304,7 +3874,6 @@ vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4
   return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4314,7 +3883,6 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8
   return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4324,7 +3892,6 @@ vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8
   return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4334,7 +3901,6 @@ vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m
   return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4344,7 +3910,6 @@ vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m
   return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4354,7 +3919,6 @@ vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m
   return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4364,7 +3928,6 @@ vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m
   return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4374,7 +3937,6 @@ vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m
   return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4384,7 +3946,6 @@ vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m
   return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -4394,7 +3955,6 @@ vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8
   return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
index 4b6c038c629d5..de0ea1494012d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
   return vmv_v_v_i8mf8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) {
   return vmv_v_x_i8mf8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
   return vmv_v_v_i8mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) {
   return vmv_v_x_i8mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
   return vmv_v_v_i8mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) {
   return vmv_v_x_i8mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) {
   return vmv_v_v_i8m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) {
   return vmv_v_x_i8m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) {
   return vmv_v_v_i8m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) {
   return vmv_v_x_i8m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) {
   return vmv_v_v_i8m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) {
   return vmv_v_x_i8m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) {
   return vmv_v_v_i8m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) {
   return vmv_v_x_i8m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
   return vmv_v_v_i16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) {
   return vmv_v_x_i16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
   return vmv_v_v_i16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) {
   return vmv_v_x_i16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
   return vmv_v_v_i16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) {
   return vmv_v_x_i16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
   return vmv_v_v_i16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) {
   return vmv_v_x_i16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
   return vmv_v_v_i16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) {
   return vmv_v_x_i16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
   return vmv_v_v_i16m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) {
   return vmv_v_x_i16m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
   return vmv_v_v_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) {
   return vmv_v_x_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
   return vmv_v_v_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) {
   return vmv_v_x_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
   return vmv_v_v_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) {
   return vmv_v_x_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
   return vmv_v_v_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) {
   return vmv_v_x_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
   return vmv_v_v_i32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) {
   return vmv_v_x_i32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
   return vmv_v_v_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) {
   return vmv_v_x_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
   return vmv_v_v_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) {
   return vmv_v_x_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
   return vmv_v_v_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) {
   return vmv_v_x_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
   return vmv_v_v_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) {
   return vmv_v_x_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
   return vmv_v_v_u8mf8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) {
   return vmv_v_x_u8mf8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
   return vmv_v_v_u8mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) {
   return vmv_v_x_u8mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
   return vmv_v_v_u8mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) {
   return vmv_v_x_u8mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
   return vmv_v_v_u8m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -525,7 +473,6 @@ vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) {
   return vmv_v_x_u8m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -535,7 +482,6 @@ vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
   return vmv_v_v_u8m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -545,7 +491,6 @@ vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) {
   return vmv_v_x_u8m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -555,7 +500,6 @@ vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
   return vmv_v_v_u8m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -565,7 +509,6 @@ vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) {
   return vmv_v_x_u8m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -575,7 +518,6 @@ vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
   return vmv_v_v_u8m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -585,7 +527,6 @@ vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) {
   return vmv_v_x_u8m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
   return vmv_v_v_u16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) {
   return vmv_v_x_u16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
   return vmv_v_v_u16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) {
   return vmv_v_x_u16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -635,7 +572,6 @@ vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
   return vmv_v_v_u16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -645,7 +581,6 @@ vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) {
   return vmv_v_x_u16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -655,7 +590,6 @@ vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
   return vmv_v_v_u16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -665,7 +599,6 @@ vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) {
   return vmv_v_x_u16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -675,7 +608,6 @@ vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
   return vmv_v_v_u16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -685,7 +617,6 @@ vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) {
   return vmv_v_x_u16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -695,7 +626,6 @@ vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
   return vmv_v_v_u16m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -705,7 +635,6 @@ vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) {
   return vmv_v_x_u16m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -715,7 +644,6 @@ vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
   return vmv_v_v_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -725,7 +653,6 @@ vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) {
   return vmv_v_x_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -735,7 +662,6 @@ vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
   return vmv_v_v_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -745,7 +671,6 @@ vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) {
   return vmv_v_x_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -755,7 +680,6 @@ vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
   return vmv_v_v_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -765,7 +689,6 @@ vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) {
   return vmv_v_x_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -775,7 +698,6 @@ vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
   return vmv_v_v_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -785,7 +707,6 @@ vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) {
   return vmv_v_x_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -795,7 +716,6 @@ vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
   return vmv_v_v_u32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -805,7 +725,6 @@ vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) {
   return vmv_v_x_u32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -815,7 +734,6 @@ vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
   return vmv_v_v_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -825,7 +743,6 @@ vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) {
   return vmv_v_x_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -835,7 +752,6 @@ vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
   return vmv_v_v_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -845,7 +761,6 @@ vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) {
   return vmv_v_x_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -855,7 +770,6 @@ vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
   return vmv_v_v_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -865,7 +779,6 @@ vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) {
   return vmv_v_x_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -875,7 +788,6 @@ vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
   return vmv_v_v_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -885,7 +797,6 @@ vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) {
   return vmv_v_x_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -895,7 +806,6 @@ vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
   return vmv_v_v_f32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -905,7 +815,6 @@ vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
   return vmv_v_v_f32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -915,7 +824,6 @@ vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
   return vmv_v_v_f32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -925,7 +833,6 @@ vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
   return vmv_v_v_f32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -935,7 +842,6 @@ vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
   return vmv_v_v_f32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -945,7 +851,6 @@ vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
   return vmv_v_v_f64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -955,7 +860,6 @@ vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
   return vmv_v_v_f64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -965,7 +869,6 @@ vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
   return vmv_v_v_f64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -975,7 +878,6 @@ vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
   return vmv_v_v_f64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf8_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
@@ -983,7 +885,6 @@ vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x_s_i8mf8_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -993,7 +894,6 @@ vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8mf8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
@@ -1001,7 +901,6 @@ vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { return vmv_x_s_i8mf4_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +910,6 @@ vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8mf4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
@@ -1019,7 +917,6 @@ vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { return vmv_x_s_i8mf2_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1029,7 +926,6 @@ vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
@@ -1037,7 +933,6 @@ vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x_s_i8m1_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1047,7 +942,6 @@ vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
@@ -1055,7 +949,6 @@ vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x_s_i8m2_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +958,6 @@ vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
@@ -1073,7 +965,6 @@ vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x_s_i8m4_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1083,7 +974,6 @@ vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
@@ -1091,7 +981,6 @@ vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x_s_i8m8_i8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1101,7 +990,6 @@ vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) {
   return vmv_s_x_i8m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
@@ -1111,7 +999,6 @@ int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) {
   return vmv_x_s_i16mf4_i16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1008,6 @@ vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) {
   return vmv_s_x_i16mf4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
@@ -1131,7 +1017,6 @@ int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) {
   return vmv_x_s_i16mf2_i16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1026,6 @@ vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) {
   return vmv_s_x_i16mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
@@ -1151,7 +1035,6 @@ int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) {
   return vmv_x_s_i16m1_i16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1161,7 +1044,6 @@ vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) {
   return vmv_s_x_i16m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
@@ -1171,7 +1053,6 @@ int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) {
   return vmv_x_s_i16m2_i16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1181,7 +1062,6 @@ vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) {
   return vmv_s_x_i16m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
@@ -1191,7 +1071,6 @@ int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) {
   return vmv_x_s_i16m4_i16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1201,7 +1080,6 @@ vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) {
   return vmv_s_x_i16m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
@@ -1211,7 +1089,6 @@ int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) {
   return vmv_x_s_i16m8_i16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1098,6 @@ vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) {
   return vmv_s_x_i16m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
@@ -1231,7 +1107,6 @@ int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) {
   return vmv_x_s_i32mf2_i32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1241,7 +1116,6 @@ vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) {
   return vmv_s_x_i32mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
@@ -1251,7 +1125,6 @@ int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) {
   return vmv_x_s_i32m1_i32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1261,7 +1134,6 @@ vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) {
   return vmv_s_x_i32m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
@@ -1271,7 +1143,6 @@ int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) {
   return vmv_x_s_i32m2_i32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1281,7 +1152,6 @@ vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) {
   return vmv_s_x_i32m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
@@ -1291,7 +1161,6 @@ int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) {
   return vmv_x_s_i32m4_i32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1301,7 +1170,6 @@ vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) {
   return vmv_s_x_i32m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
@@ -1311,7 +1179,6 @@ int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) {
   return vmv_x_s_i32m8_i32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1321,7 +1188,6 @@ vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) {
   return vmv_s_x_i32m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
@@ -1331,7 +1197,6 @@ int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) {
   return vmv_x_s_i64m1_i64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1341,7 +1206,6 @@ vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) {
   return vmv_s_x_i64m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
@@ -1351,7 +1215,6 @@ int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) {
   return vmv_x_s_i64m2_i64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1361,7 +1224,6 @@ vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) {
   return vmv_s_x_i64m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
@@ -1371,7 +1233,6 @@ int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) {
   return vmv_x_s_i64m4_i64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1381,7 +1242,6 @@ vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) {
   return vmv_s_x_i64m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
@@ -1391,7 +1251,6 @@ int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) {
   return vmv_x_s_i64m8_i64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1401,7 +1260,6 @@ vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) {
   return vmv_s_x_i64m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
@@ -1409,7 +1267,6 @@ vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x_s_u8mf8_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1419,7 +1276,6 @@ vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8mf8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
@@ -1427,7 +1283,6 @@ vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x_s_u8mf4_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1437,7 +1292,6 @@ vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8mf4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
@@ -1445,7 +1299,6 @@ vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x_s_u8mf2_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1455,7 +1308,6 @@ vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
@@ -1463,7 +1315,6 @@ vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x_s_u8m1_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1473,7 +1324,6 @@ vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
@@ -1481,7 +1331,6 @@ vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x_s_u8m2_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1491,7 +1340,6 @@ vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
@@ -1499,7 +1347,6 @@ vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { return vmv_x_s_u8m4_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1509,7 +1356,6 @@ vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
@@ -1517,7 +1363,6 @@ vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { return vmv_x_s_u8m8_u8(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1527,7 +1372,6 @@ vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) {
   return vmv_s_x_u8m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
@@ -1537,7 +1381,6 @@ uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) {
   return vmv_x_s_u16mf4_u16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1547,7 +1390,6 @@ vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) {
   return vmv_s_x_u16mf4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
@@ -1557,7 +1399,6 @@ uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) {
   return vmv_x_s_u16mf2_u16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1408,6 @@ vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) {
   return vmv_s_x_u16mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
@@ -1577,7 +1417,6 @@ uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) {
   return vmv_x_s_u16m1_u16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1587,7 +1426,6 @@ vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) {
   return vmv_s_x_u16m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
@@ -1597,7 +1435,6 @@ uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) {
   return vmv_x_s_u16m2_u16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1607,7 +1444,6 @@ vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) {
   return vmv_s_x_u16m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
@@ -1617,7 +1453,6 @@ uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) {
   return vmv_x_s_u16m4_u16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1627,7 +1462,6 @@ vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) {
   return vmv_s_x_u16m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
@@ -1637,7 +1471,6 @@ uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) {
   return vmv_x_s_u16m8_u16(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1647,7 +1480,6 @@ vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) {
   return vmv_s_x_u16m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
@@ -1657,7 +1489,6 @@ uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) {
   return vmv_x_s_u32mf2_u32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1667,7 +1498,6 @@ vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) {
   return vmv_s_x_u32mf2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
@@ -1677,7 +1507,6 @@ uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) {
   return vmv_x_s_u32m1_u32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1687,7 +1516,6 @@ vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) {
   return vmv_s_x_u32m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
@@ -1697,7 +1525,6 @@ uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) {
   return vmv_x_s_u32m2_u32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1534,6 @@ vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) {
   return vmv_s_x_u32m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
@@ -1717,7 +1543,6 @@ uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) {
   return vmv_x_s_u32m4_u32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1727,7 +1552,6 @@ vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) {
   return vmv_s_x_u32m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
@@ -1737,7 +1561,6 @@ uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) {
   return vmv_x_s_u32m8_u32(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1747,7 +1570,6 @@ vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) {
   return vmv_s_x_u32m8(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
@@ -1757,7 +1579,6 @@ uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) {
   return vmv_x_s_u64m1_u64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1767,7 +1588,6 @@ vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) {
   return vmv_s_x_u64m1(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
@@ -1777,7 +1597,6 @@ uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) {
   return vmv_x_s_u64m2_u64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1606,6 @@ vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) {
   return vmv_s_x_u64m2(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
@@ -1797,7 +1615,6 @@ uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) {
   return vmv_x_s_u64m4_u64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1807,7 +1624,6 @@ vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) {
   return vmv_s_x_u64m4(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
@@ -1817,7 +1633,6 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
   return vmv_x_s_u64m8_u64(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
index 5949db61717cd..e64dd8e9224d8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmxnor_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmxnor_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmxnor_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmxnor_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmxnor_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmxnor_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
index 2a4d064fa56db..1f32736c1c637 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmxor_mm_b1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmxor_mm_b2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmxor_mm_b4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmxor_mm_b8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmxor_mm_b16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmxor_mm_b32(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
index e4a0d905f75bf..11aa3256a0c4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
   return vnclip_wv_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
   return vnclip_wv_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
   return vnclip_wv_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnclip_wv_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnclip_wv_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnclip_wv_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
   return vnclip_wv_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -156,7 +141,6 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
   return vnclip_wv_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -166,7 +150,6 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
   return vnclip_wv_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
   return vnclip_wv_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
   return vnclip_wv_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -237,7 +214,6 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
   return vnclip_wv_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -247,7 +223,6 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -257,7 +232,6 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
   return vnclip_wv_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -267,7 +241,6 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
   return vnclip_wv_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
   return vnclip_wv_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
   return vnclip_wx_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -318,7 +287,6 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
   return vnclipu_wv_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -328,7 +296,6 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -339,7 +306,6 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
   return vnclipu_wv_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -349,7 +315,6 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -360,7 +325,6 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
   return vnclipu_wv_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -370,7 +334,6 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -380,7 +343,6 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnclipu_wv_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -390,7 +352,6 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -400,7 +361,6 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnclipu_wv_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -410,7 +370,6 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -420,7 +379,6 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnclipu_wv_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -430,7 +388,6 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -441,7 +398,6 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
   return vnclipu_wv_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -451,7 +407,6 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -462,7 +417,6 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
   return vnclipu_wv_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -472,7 +426,6 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -483,7 +436,6 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
   return vnclipu_wv_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -493,7 +445,6 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -504,7 +455,6 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
   return vnclipu_wv_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -514,7 +464,6 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -525,7 +474,6 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
   return vnclipu_wv_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -535,7 +483,6 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -546,7 +493,6 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
   return vnclipu_wv_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -556,7 +502,6 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -567,7 +512,6 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
   return vnclipu_wv_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -577,7 +521,6 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -588,7 +531,6 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
   return vnclipu_wv_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -598,7 +540,6 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -609,7 +550,6 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
   return vnclipu_wv_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -619,7 +559,6 @@ vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
   return vnclipu_wx_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -631,7 +570,6 @@ vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vnclip_wv_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -642,7 +580,6 @@ vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vnclip_wx_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +591,6 @@ vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vnclip_wv_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +601,6 @@ vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vnclip_wx_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -677,7 +612,6 @@ vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vnclip_wv_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -688,7 +622,6 @@ vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vnclip_wx_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -699,7 +632,6 @@ vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vnclip_wv_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -710,7 +642,6 @@ vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vnclip_wx_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -721,7 +652,6 @@ vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vnclip_wv_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -732,7 +662,6 @@ vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vnclip_wx_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -743,7 +672,6 @@ vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vnclip_wv_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -754,7 +682,6 @@ vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vnclip_wx_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -766,7 +693,6 @@ vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vnclip_wv_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -777,7 +703,6 @@ vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vnclip_wx_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -789,7 +714,6 @@ vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vnclip_wv_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +724,6 @@ vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vnclip_wx_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +735,6 @@ vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vnclip_wv_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -823,7 +745,6 @@ vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vnclip_wx_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -835,7 +756,6 @@ vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vnclip_wv_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -846,7 +766,6 @@ vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vnclip_wx_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -858,7 +777,6 @@ vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vnclip_wv_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -869,7 +787,6 @@ vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vnclip_wx_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -881,7 +798,6 @@ vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vnclip_wv_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -892,7 +808,6 @@ vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vnclip_wx_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +819,6 @@ vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vnclip_wv_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -915,7 +829,6 @@ vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vnclip_wx_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -927,7 +840,6 @@ vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vnclip_wv_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -938,7 +850,6 @@ vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vnclip_wx_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -950,7 +861,6 @@ vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vnclip_wv_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -961,7 +871,6 @@ vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vnclip_wx_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -973,7 +882,6 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vnclipu_wv_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +892,6 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vnclipu_wx_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -996,7 +903,6 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vnclipu_wv_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1007,7 +913,6 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vnclipu_wx_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1019,7 +924,6 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vnclipu_wv_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +934,6 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vnclipu_wx_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1042,7 +945,6 @@ vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vnclipu_wv_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +955,6 @@ vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vnclipu_wx_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +966,6 @@ vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vnclipu_wv_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +976,6 @@ vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vnclipu_wx_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1088,7 +987,6 @@ vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vnclipu_wv_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +997,6 @@ vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vnclipu_wx_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1111,7 +1008,6 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vnclipu_wv_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1123,7 +1019,6 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vnclipu_wx_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1135,7 +1030,6 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vnclipu_wv_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1147,7 +1041,6 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vnclipu_wx_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1159,7 +1052,6 @@ vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vnclipu_wv_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1170,7 +1062,6 @@ vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vnclipu_wx_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1182,7 +1073,6 @@ vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vnclipu_wv_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1193,7 +1083,6 @@ vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vnclipu_wx_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1205,7 +1094,6 @@ vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vnclipu_wv_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1216,7 +1104,6 @@ vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vnclipu_wx_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1228,7 +1115,6 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vnclipu_wv_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1240,7 +1126,6 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vnclipu_wx_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1252,7 +1137,6 @@ vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vnclipu_wv_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1263,7 +1147,6 @@ vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vnclipu_wx_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1275,7 +1158,6 @@ vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vnclipu_wv_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1168,6 @@ vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vnclipu_wx_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1179,6 @@ vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vnclipu_wv_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
index b075e63e4a1f2..f316e1674d2ec 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
   return vncvt_x_x_w_i8mf8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
   return vncvt_x_x_w_i8mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
   return vncvt_x_x_w_i8mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
   return vncvt_x_x_w_i8m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
   return vncvt_x_x_w_i8m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
   return vncvt_x_x_w_i8m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
   return vncvt_x_x_w_u8mf8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
   return vncvt_x_x_w_u8mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
   return vncvt_x_x_w_u8mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
   return vncvt_x_x_w_u8m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
   return vncvt_x_x_w_u8m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
   return vncvt_x_x_w_u8m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
   return vncvt_x_x_w_i16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
   return vncvt_x_x_w_i16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
   return vncvt_x_x_w_i16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
   return vncvt_x_x_w_i16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
   return vncvt_x_x_w_i16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
   return vncvt_x_x_w_u16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
   return vncvt_x_x_w_u16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
   return vncvt_x_x_w_u16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
   return vncvt_x_x_w_u16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
   return vncvt_x_x_w_u16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
   return vncvt_x_x_w_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
   return vncvt_x_x_w_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
   return vncvt_x_x_w_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
   return vncvt_x_x_w_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
   return vncvt_x_x_w_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
   return vncvt_x_x_w_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
   return vncvt_x_x_w_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) {
   return vncvt_x_x_w_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint1
   return vncvt_x_x_w_i8mf8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint1
   return vncvt_x_x_w_i8mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint1
   return vncvt_x_x_w_i8mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint8m1_t test_vncvt_x_x_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_
   return vncvt_x_x_w_i8m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint8m2_t test_vncvt_x_x_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_
   return vncvt_x_x_w_i8m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint8m4_t test_vncvt_x_x_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_
   return vncvt_x_x_w_i8m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vui
   return vncvt_x_x_w_u8mf8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vui
   return vncvt_x_x_w_u8mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vui
   return vncvt_x_x_w_u8mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16
   return vncvt_x_x_w_u8m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16
   return vncvt_x_x_w_u8m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16
   return vncvt_x_x_w_u8m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi
   return vncvt_x_x_w_i16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi
   return vncvt_x_x_w_i16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint16m1_t test_vncvt_x_x_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint3
   return vncvt_x_x_w_i16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint16m2_t test_vncvt_x_x_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32
   return vncvt_x_x_w_i16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint16m4_t test_vncvt_x_x_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32
   return vncvt_x_x_w_i16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff,
   return vncvt_x_x_w_u16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff,
   return vncvt_x_x_w_u16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vui
   return vncvt_x_x_w_u16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuin
   return vncvt_x_x_w_u16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuin
   return vncvt_x_x_w_u16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi
   return vncvt_x_x_w_i32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint32m1_t test_vncvt_x_x_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint6
   return vncvt_x_x_w_i32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint32m2_t test_vncvt_x_x_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint6
   return vncvt_x_x_w_i32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint32m4_t test_vncvt_x_x_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64
   return vncvt_x_x_w_i32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff,
   return vncvt_x_x_w_u32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vui
   return vncvt_x_x_w_u32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vui
   return vncvt_x_x_w_u32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
index 847667a183338..651d25d7a9361 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
   return vneg_v_i8mf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
   return vneg_v_i8mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
   return vneg_v_i8mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
   return vneg_v_i8m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
   return vneg_v_i8m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
   return vneg_v_i8m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
   return vneg_v_i8m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
   return vneg_v_i16mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
   return vneg_v_i16mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
   return vneg_v_i16m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
   return vneg_v_i16m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
   return vneg_v_i16m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
   return vneg_v_i16m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
   return vneg_v_i32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
   return vneg_v_i32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
   return vneg_v_i32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
   return vneg_v_i32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
   return vneg_v_i32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
   return vneg_v_i64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
   return vneg_v_i64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
   return vneg_v_i64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
   return vneg_v_i64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vneg_v_i8mf8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vint8mf4_t test_vneg_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vneg_v_i8mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vint8mf2_t test_vneg_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vneg_v_i8mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vint8m1_t test_vneg_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vneg_v_i8m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vint8m2_t test_vneg_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vneg_v_i8m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vint8m4_t test_vneg_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vneg_v_i8m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 0, <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vint8m8_t test_vneg_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vneg_v_i8m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vint16mf4_t test_vneg_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vneg_v_i16mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vint16mf2_t test_vneg_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vneg_v_i16mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vint16m1_t test_vneg_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vneg_v_i16m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vint16m2_t test_vneg_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vneg_v_i16m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vint16m4_t test_vneg_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vneg_v_i16m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vint16m8_t test_vneg_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vneg_v_i16m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vint32mf2_t test_vneg_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vneg_v_i32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vint32m1_t test_vneg_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vneg_v_i32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vint32m2_t test_vneg_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vneg_v_i32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vint32m4_t test_vneg_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vneg_v_i32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vint32m8_t test_vneg_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vneg_v_i32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vint64m1_t test_vneg_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vneg_v_i64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vint64m2_t test_vneg_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vneg_v_i64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vneg_v_i64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
index 722c8a7348a19..52aea8a954556 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
   return vnmsac_vv_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
   return vnmsac_vx_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
   return vnmsac_vv_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
   return vnmsac_vx_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
   return vnmsac_vv_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
   return vnmsac_vx_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
   return vnmsac_vv_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
   return vnmsac_vx_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
   return vnmsac_vv_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
   return vnmsac_vx_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
   return vnmsac_vv_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
   return vnmsac_vx_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
   return vnmsac_vv_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
   return vnmsac_vx_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
   return vnmsac_vv_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vnmsac_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
   return vnmsac_vv_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vnmsac_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
   return vnmsac_vv_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
   return vnmsac_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
   return vnmsac_vv_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
   return vnmsac_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
   return vnmsac_vv_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
   return vnmsac_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
   return vnmsac_vv_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
   return vnmsac_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
   return vnmsac_vv_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vnmsac_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
   return vnmsac_vv_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
   return vnmsac_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
   return vnmsac_vv_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
   return vnmsac_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
   return vnmsac_vv_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
   return vnmsac_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
   return vnmsac_vv_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
   return vnmsac_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
   return vnmsac_vv_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
   return vnmsac_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
   return vnmsac_vv_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
   return vnmsac_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
   return vnmsac_vv_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
   return vnmsac_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
   return vnmsac_vv_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
   return vnmsac_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
   return vnmsac_vv_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
   return vnmsac_vx_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
   return vnmsac_vv_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
   return vnmsac_vx_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
   return vnmsac_vv_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
   return vnmsac_vx_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
   return vnmsac_vv_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
   return vnmsac_vx_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
   return vnmsac_vv_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
   return vnmsac_vx_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
   return vnmsac_vv_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
   return vnmsac_vx_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
   return vnmsac_vv_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
   return vnmsac_vx_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
   return vnmsac_vv_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
   return vnmsac_vx_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
   return vnmsac_vv_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
   return vnmsac_vx_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
   return vnmsac_vv_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vnmsac_vx_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
   return vnmsac_vv_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vnmsac_vx_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
   return vnmsac_vv_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vnmsac_vx_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
   return vnmsac_vv_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vnmsac_vx_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
   return vnmsac_vv_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
   return vnmsac_vx_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
   return vnmsac_vv_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vnmsac_vx_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
   return vnmsac_vv_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vnmsac_vx_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
   return vnmsac_vv_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vnmsac_vx_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
   return vnmsac_vv_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vnmsac_vx_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
   return vnmsac_vv_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vnmsac_vx_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
   return vnmsac_vv_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vnmsac_vx_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
   return vnmsac_vv_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vnmsac_vx_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
   return vnmsac_vv_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vnmsac_vx_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
   return vnmsac_vv_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
   return vnmsac_vx_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
   return vnmsac_vv_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
   return vnmsac_vx_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
   return vnmsac_vv_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
   return vnmsac_vx_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
   return vnmsac_vv_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
   return vnmsac_vx_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
   return vnmsac_vv_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
   return vnmsac_vx_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
   return vnmsac_vv_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
   return vnmsac_vx_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
   return vnmsac_vv_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
   return vnmsac_vx_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vnmsac_vv_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
   return vnmsac_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vnmsac_vv_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
   return vnmsac_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
   return vnmsac_vv_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
   return vnmsac_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vnmsac_vv_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
   return vnmsac_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vnmsac_vv_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
   return vnmsac_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vnmsac_vv_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
   return vnmsac_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vnmsac_vv_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
   return vnmsac_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
   return vnmsac_vv_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
   return vnmsac_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
   return vnmsac_vv_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
   return vnmsac_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vnmsac_vv_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
   return vnmsac_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vnmsac_vv_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
   return vnmsac_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
   return vnmsac_vv_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
   return vnmsac_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
   return vnmsac_vv_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
   return vnmsac_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
   return vnmsac_vv_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
   return vnmsac_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vnmsac_vv_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
   return vnmsac_vx_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
   return vnmsac_vv_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vnmsac_vx_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
   return vnmsac_vv_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vnmsac_vx_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
   return vnmsac_vv_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vnmsac_vx_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
   return vnmsac_vv_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
   return vnmsac_vx_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
   return vnmsac_vv_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
   return vnmsac_vx_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
   return vnmsac_vv_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
   return vnmsac_vx_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
   return vnmsac_vv_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
   return vnmsac_vx_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
   return vnmsac_vv_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
   return vnmsac_vx_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
   return vnmsac_vv_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
   return vnmsac_vx_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
   return vnmsac_vv_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
   return vnmsac_vx_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
   return vnmsac_vv_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vnmsac_vx_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
   return vnmsac_vv_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vnmsac_vx_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
   return vnmsac_vv_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vnmsac_vx_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
   return vnmsac_vv_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
   return vnmsac_vx_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
   return vnmsac_vv_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
   return vnmsac_vx_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
   return vnmsac_vv_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
   return vnmsac_vx_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
   return vnmsac_vv_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vnmsac_vx_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
   return vnmsac_vv_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vnmsac_vx_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
   return vnmsac_vv_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
   return vnmsac_vx_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
   return vnmsac_vv_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
   return vnmsac_vx_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
   return vnmsac_vv_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
   return vnmsac_vx_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
   return vnmsac_vv_u64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
index 619a5ce30106d..e8fae27f84a15 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
   return vnmsub_vv_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
   return vnmsub_vx_i8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
   return vnmsub_vv_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
   return vnmsub_vx_i8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
   return vnmsub_vv_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
   return vnmsub_vx_i8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
   return vnmsub_vv_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
   return vnmsub_vx_i8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
   return vnmsub_vv_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
   return vnmsub_vx_i8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
   return vnmsub_vv_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
   return vnmsub_vx_i8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
   return vnmsub_vv_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
   return vnmsub_vx_i8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
   return vnmsub_vv_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vnmsub_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
   return vnmsub_vv_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vnmsub_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
   return vnmsub_vv_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
   return vnmsub_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
   return vnmsub_vv_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
   return vnmsub_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
   return vnmsub_vv_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
   return vnmsub_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
   return vnmsub_vv_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
   return vnmsub_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
   return vnmsub_vv_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vnmsub_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
   return vnmsub_vv_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
   return vnmsub_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
   return vnmsub_vv_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
   return vnmsub_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
   return vnmsub_vv_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
   return vnmsub_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
   return vnmsub_vv_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
   return vnmsub_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
   return vnmsub_vv_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
   return vnmsub_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
   return vnmsub_vv_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
   return vnmsub_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
   return vnmsub_vv_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
   return vnmsub_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
   return vnmsub_vv_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
   return vnmsub_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
   return vnmsub_vv_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
   return vnmsub_vx_u8mf8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
   return vnmsub_vv_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
   return vnmsub_vx_u8mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
   return vnmsub_vv_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
   return vnmsub_vx_u8mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
   return vnmsub_vv_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
   return vnmsub_vx_u8m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
   return vnmsub_vv_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
   return vnmsub_vx_u8m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
   return vnmsub_vv_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
   return vnmsub_vx_u8m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
   return vnmsub_vv_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
   return vnmsub_vx_u8m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
   return vnmsub_vv_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
   return vnmsub_vx_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
   return vnmsub_vv_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
   return vnmsub_vx_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
   return vnmsub_vv_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vnmsub_vx_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
   return vnmsub_vv_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vnmsub_vx_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
   return vnmsub_vv_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vnmsub_vx_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
   return vnmsub_vv_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vnmsub_vx_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
   return vnmsub_vv_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
   return vnmsub_vx_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
   return vnmsub_vv_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vnmsub_vx_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
   return vnmsub_vv_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vnmsub_vx_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
   return vnmsub_vv_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vnmsub_vx_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
   return vnmsub_vv_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vnmsub_vx_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
   return vnmsub_vv_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vnmsub_vx_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
   return vnmsub_vv_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vnmsub_vx_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
   return vnmsub_vv_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vnmsub_vx_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
   return vnmsub_vv_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vnmsub_vx_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
   return vnmsub_vv_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
   return vnmsub_vx_i8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
   return vnmsub_vv_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
   return vnmsub_vx_i8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
   return vnmsub_vv_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
   return vnmsub_vx_i8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
   return vnmsub_vv_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
   return vnmsub_vx_i8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
   return vnmsub_vv_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
   return vnmsub_vx_i8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
   return vnmsub_vv_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
   return vnmsub_vx_i8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
   return vnmsub_vv_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
   return vnmsub_vx_i8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vnmsub_vv_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
   return vnmsub_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vnmsub_vv_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
   return vnmsub_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
   return vnmsub_vv_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
   return vnmsub_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vnmsub_vv_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
   return vnmsub_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vnmsub_vv_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
   return vnmsub_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vnmsub_vv_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
   return vnmsub_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vnmsub_vv_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
   return vnmsub_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
   return vnmsub_vv_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
   return vnmsub_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
   return vnmsub_vv_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
   return vnmsub_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vnmsub_vv_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
   return vnmsub_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vnmsub_vv_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
   return vnmsub_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
   return vnmsub_vv_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
   return vnmsub_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
   return vnmsub_vv_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
   return vnmsub_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
   return vnmsub_vv_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
   return vnmsub_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vnmsub_vv_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
   return vnmsub_vx_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
   return vnmsub_vv_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vnmsub_vx_u8mf8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
   return vnmsub_vv_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vnmsub_vx_u8mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
   return vnmsub_vv_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vnmsub_vx_u8mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
   return vnmsub_vv_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
   return vnmsub_vx_u8m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
   return vnmsub_vv_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
   return vnmsub_vx_u8m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
   return vnmsub_vv_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
   return vnmsub_vx_u8m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
   return vnmsub_vv_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
   return vnmsub_vx_u8m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
   return vnmsub_vv_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
   return vnmsub_vx_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
   return vnmsub_vv_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
   return vnmsub_vx_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
   return vnmsub_vv_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
   return vnmsub_vx_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
   return vnmsub_vv_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vnmsub_vx_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
   return vnmsub_vv_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vnmsub_vx_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
   return vnmsub_vv_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vnmsub_vx_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
   return vnmsub_vv_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
   return vnmsub_vx_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
   return vnmsub_vv_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
   return vnmsub_vx_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
   return vnmsub_vv_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
   return vnmsub_vx_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
   return vnmsub_vv_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vnmsub_vx_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
   return vnmsub_vv_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vnmsub_vx_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
   return vnmsub_vv_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
   return vnmsub_vx_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
   return vnmsub_vv_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
   return vnmsub_vx_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
   return vnmsub_vv_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
   return vnmsub_vx_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
   return vnmsub_vv_u64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
index 689486692a0fd..c45a2ff682104 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
   return vnot_v_i8mf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
   return vnot_v_i8mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
   return vnot_v_i8mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
   return vnot_v_i8m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
   return vnot_v_i8m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
   return vnot_v_i8m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
   return vnot_v_i8m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
   return vnot_v_i16mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
   return vnot_v_i16mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
   return vnot_v_i16m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
   return vnot_v_i16m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
   return vnot_v_i16m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
   return vnot_v_i16m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
   return vnot_v_i32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
   return vnot_v_i32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
   return vnot_v_i32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
   return vnot_v_i32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
   return vnot_v_i32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
   return vnot_v_i64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
   return vnot_v_i64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
   return vnot_v_i64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
   return vnot_v_i64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
   return vnot_v_u8mf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
   return vnot_v_u8mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
   return vnot_v_u8mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
   return vnot_v_u8m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
   return vnot_v_u8m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
   return vnot_v_u8m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
   return vnot_v_u8m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
   return vnot_v_u16mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
   return vnot_v_u16mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
   return vnot_v_u16m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
   return vnot_v_u16m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
   return vnot_v_u16m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
   return vnot_v_u16m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
   return vnot_v_u32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
   return vnot_v_u32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
   return vnot_v_u32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
   return vnot_v_u32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
   return vnot_v_u32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
   return vnot_v_u64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
   return vnot_v_u64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
   return vnot_v_u64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) {
   return vnot_v_u64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vint8mf8_t test_vnot_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vnot_v_i8mf8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vint8mf4_t test_vnot_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vnot_v_i8mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vint8mf2_t test_vnot_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vnot_v_i8mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vint8m1_t test_vnot_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vnot_v_i8m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 -1, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vint8m2_t test_vnot_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vnot_v_i8m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 -1, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vint8m4_t test_vnot_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vnot_v_i8m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 -1, <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vint8m8_t test_vnot_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vnot_v_i8m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -525,7 +473,6 @@ vint16mf4_t test_vnot_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vnot_v_i16mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -535,7 +482,6 @@ vint16mf2_t test_vnot_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vnot_v_i16mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -545,7 +491,6 @@ vint16m1_t test_vnot_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vnot_v_i16m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -555,7 +500,6 @@ vint16m2_t test_vnot_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vnot_v_i16m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 -1, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -565,7 +509,6 @@ vint16m4_t test_vnot_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vnot_v_i16m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 -1, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -575,7 +518,6 @@ vint16m8_t test_vnot_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vnot_v_i16m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -585,7 +527,6 @@ vint32mf2_t test_vnot_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vnot_v_i32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vint32m1_t test_vnot_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vnot_v_i32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vint32m2_t test_vnot_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vnot_v_i32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vint32m4_t test_vnot_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vnot_v_i32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 -1, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vint32m8_t test_vnot_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vnot_v_i32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -635,7 +572,6 @@ vint64m1_t test_vnot_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vnot_v_i64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -645,7 +581,6 @@ vint64m2_t test_vnot_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vnot_v_i64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -655,7 +590,6 @@ vint64m4_t test_vnot_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vnot_v_i64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +599,6 @@ vint64m8_t test_vnot_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vnot_v_i64m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -675,7 +608,6 @@ vuint8mf8_t test_vnot_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vnot_v_u8mf8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -685,7 +617,6 @@ vuint8mf4_t test_vnot_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vnot_v_u8mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -695,7 +626,6 @@ vuint8mf2_t test_vnot_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vnot_v_u8mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -705,7 +635,6 @@ vuint8m1_t test_vnot_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vnot_v_u8m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 -1, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -715,7 +644,6 @@ vuint8m2_t test_vnot_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vnot_v_u8m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 -1, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -725,7 +653,6 @@ vuint8m4_t test_vnot_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vnot_v_u8m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 -1, <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -735,7 +662,6 @@ vuint8m8_t test_vnot_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vnot_v_u8m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -745,7 +671,6 @@ vuint16mf4_t test_vnot_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vnot_v_u16mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -755,7 +680,6 @@ vuint16mf2_t test_vnot_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vnot_v_u16mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -765,7 +689,6 @@ vuint16m1_t test_vnot_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vnot_v_u16m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -775,7 +698,6 @@ vuint16m2_t test_vnot_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vnot_v_u16m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 -1, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -785,7 +707,6 @@ vuint16m4_t test_vnot_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vnot_v_u16m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 -1, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -795,7 +716,6 @@ vuint16m8_t test_vnot_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vnot_v_u16m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -805,7 +725,6 @@ vuint32mf2_t test_vnot_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vnot_v_u32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -815,7 +734,6 @@ vuint32m1_t test_vnot_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vnot_v_u32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -825,7 +743,6 @@ vuint32m2_t test_vnot_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vnot_v_u32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -835,7 +752,6 @@ vuint32m4_t test_vnot_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vnot_v_u32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 -1, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -845,7 +761,6 @@ vuint32m8_t test_vnot_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vnot_v_u32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -855,7 +770,6 @@ vuint64m1_t test_vnot_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vnot_v_u64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 -1, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -865,7 +779,6 @@ vuint64m2_t test_vnot_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vnot_v_u64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 -1, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -875,7 +788,6 @@ vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vnot_v_u64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 -1, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
index 013b863bf4ba0..21ca9ab1154b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
   return vnsra_wv_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
   return vnsra_wv_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
   return vnsra_wv_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnsra_wv_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnsra_wv_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnsra_wv_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
   return vnsra_wv_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
   return vnsra_wv_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
   return vnsra_wv_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
   return vnsra_wv_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
   return vnsra_wv_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
   return vnsra_wv_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
   return vnsra_wv_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
   return vnsra_wv_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
   return vnsra_wv_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
   return vnsra_wx_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4
   return vnsra_wv_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4
   return vnsra_wx_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2
   return vnsra_wv_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2
   return vnsra_wx_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_
   return vnsra_wv_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_
   return vnsra_wx_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op
   return vnsra_wv_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op
   return vnsra_wx_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op
   return vnsra_wv_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op
   return vnsra_wx_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op
   return vnsra_wv_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op
   return vnsra_wx_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32
   return vnsra_wv_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32
   return vnsra_wx_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32
   return vnsra_wv_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32
   return vnsra_wx_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_
   return vnsra_wv_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_
   return vnsra_wx_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t
   return vnsra_wv_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t
   return vnsra_wx_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t
   return vnsra_wv_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t
   return vnsra_wx_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64
   return vnsra_wv_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64
   return vnsra_wx_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_
   return vnsra_wv_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_
   return vnsra_wx_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_
   return vnsra_wv_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_
   return vnsra_wx_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t
   return vnsra_wv_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
index 557ca5edfb530..27643f49c807f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
   return vnsrl_wv_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
   return vnsrl_wv_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
   return vnsrl_wv_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnsrl_wv_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnsrl_wv_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnsrl_wv_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
   return vnsrl_wv_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
   return vnsrl_wv_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
   return vnsrl_wv_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
   return vnsrl_wv_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
   return vnsrl_wv_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
   return vnsrl_wv_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
   return vnsrl_wv_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
   return vnsrl_wv_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
   return vnsrl_wv_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
   return vnsrl_wx_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16
   return vnsrl_wv_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16
   return vnsrl_wx_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16
   return vnsrl_wv_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16
   return vnsrl_wx_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16
   return vnsrl_wv_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16
   return vnsrl_wx_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t
   return vnsrl_wv_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t
   return vnsrl_wx_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t
   return vnsrl_wv_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t
   return vnsrl_wx_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t
   return vnsrl_wv_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t
   return vnsrl_wx_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vnsrl_wv_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vnsrl_wx_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vnsrl_wv_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vnsrl_wx_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32
   return vnsrl_wv_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32
   return vnsrl_wx_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m
   return vnsrl_wv_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m
   return vnsrl_wx_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m
   return vnsrl_wv_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m
   return vnsrl_wx_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vnsrl_wv_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vnsrl_wx_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64
   return vnsrl_wv_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64
   return vnsrl_wx_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64
   return vnsrl_wv_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64
   return vnsrl_wx_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m
   return vnsrl_wv_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
index cc885cf32317d..34fe0fa06a0c2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vor_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vor_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vor_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vor_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vor_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vor_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vor_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vor_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vor_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vor_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vor_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vor_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vor_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vor_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vor_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vor_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vor_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vor_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vor_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vor_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vor_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vor_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vor_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vor_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vor_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vor_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vor_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vor_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vor_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vor_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vor_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vor_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vor_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vor_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vor_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vor_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vor_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vor_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vor_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vor_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vor_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vor_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vor_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vor_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vor_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vor_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
   return vor_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vor_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
   return vor_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vor_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vor_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vor_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vor_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vor_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vor_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vor_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vor_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vor_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
   return vor_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vor_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vor_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vor_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vor_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vor_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vor_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vor_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vor_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vor_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vor_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vor_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vor_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vor_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vor_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vor_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vor_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vor_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vor_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vor_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vor_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vor_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vor_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vor_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vor_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vor_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf
   return vor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf
   return vor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf
   return vor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf
   return vor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vor_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vor_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t o
   return vor_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t o
   return vor_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t o
   return vor_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t o
   return vor_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t o
   return vor_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t o
   return vor_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf
   return vor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf
   return vor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vor_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vor_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vor_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vor_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t o
   return vor_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t o
   return vor_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t o
   return vor_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t o
   return vor_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vor_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vor_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vor_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vor_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vor_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vor_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t o
   return vor_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t o
   return vor_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8
   return vor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8
   return vor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4
   return vor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4
   return vor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2
   return vor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2
   return vor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op
   return vor_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op
   return vor_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op
   return vor_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op
   return vor_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op
   return vor_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op
   return vor_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op
   return vor_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op
   return vor_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint1
   return vor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint1
   return vor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint1
   return vor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint1
   return vor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1
   return vor_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1
   return vor_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_
   return vor_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_
   return vor_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_
   return vor_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_
   return vor_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_
   return vor_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_
   return vor_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint3
   return vor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint3
   return vor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1
   return vor_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1
   return vor_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2
   return vor_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2
   return vor_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_
   return vor_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_
   return vor_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_
   return vor_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_
   return vor_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1
   return vor_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1
   return vor_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2
   return vor_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2
   return vor_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4
   return vor_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4
   return vor_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_
   return vor_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
index 661feb32d7818..e155e82301657 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
   return vpopc_m_b1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
   return vpopc_m_b2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
   return vpopc_m_b4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
   return vpopc_m_b8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
   return vpopc_m_b16(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
   return vpopc_m_b32(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
   return vpopc_m_b64(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
   return vpopc_m_b1_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
   return vpopc_m_b2_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
   return vpopc_m_b4_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
   return vpopc_m_b8_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
   return vpopc_m_b16_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
   return vpopc_m_b32_m(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
index 0c7cd539c6cfd..2da8f15cdad75 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredand_vs_i8mf8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredand_vs_i8mf4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredand_vs_i8mf2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredand_vs_i8m1_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredand_vs_i8m2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredand_vs_i8m4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredand_vs_i8m8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredand_vs_i16mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredand_vs_i16mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredand_vs_i16m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredand_vs_i16m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredand_vs_i16m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredand_vs_i16m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredand_vs_i32mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredand_vs_i32m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredand_vs_i32m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredand_vs_i32m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredand_vs_i32m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredand_vs_i64m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredand_vs_i64m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredand_vs_i64m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredand_vs_i64m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredand_vs_u8mf8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredand_vs_u8mf4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredand_vs_u8mf2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredand_vs_u8m1_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredand_vs_u8m2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredand_vs_u8m4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredand_vs_u8m8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredand_vs_u16mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredand_vs_u16mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredand_vs_u16m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredand_vs_u16m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredand_vs_u16m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredand_vs_u16m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredand_vs_u32mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredand_vs_u32m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredand_vs_u32m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredand_vs_u32m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredand_vs_u32m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredand_vs_u64m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredand_vs_u64m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredand_vs_u64m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredand_vs_u64m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredand_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredand_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredand_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredand_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredand_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredand_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredand_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredand_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredand_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredand_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredand_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredand_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredand_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredand_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredand_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredand_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredand_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredand_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredand_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredand_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredand_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredand_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredand_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredand_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredand_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredand_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredand_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredand_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredand_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredand_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredand_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredand_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredand_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredand_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredand_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredand_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredand_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredand_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredand_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredand_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredand_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredand_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredand_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
index b44583537f992..c886985708326 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredmax_vs_i8mf8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredmax_vs_i8mf4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredmax_vs_i8mf2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredmax_vs_i8m1_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredmax_vs_i8m2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredmax_vs_i8m4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredmax_vs_i8m8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredmax_vs_i16mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredmax_vs_i16mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredmax_vs_i16m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredmax_vs_i16m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredmax_vs_i16m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredmax_vs_i16m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredmax_vs_i32mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredmax_vs_i32m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredmax_vs_i32m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredmax_vs_i32m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredmax_vs_i32m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredmax_vs_i64m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredmax_vs_i64m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredmax_vs_i64m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredmax_vs_i64m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredmaxu_vs_u8mf8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredmaxu_vs_u8mf4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredmaxu_vs_u8mf2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredmaxu_vs_u8m1_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredmaxu_vs_u8m2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredmaxu_vs_u8m4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredmaxu_vs_u8m8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredmaxu_vs_u16mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredmaxu_vs_u16mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredmaxu_vs_u16m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredmaxu_vs_u16m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredmaxu_vs_u16m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredmaxu_vs_u16m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredmaxu_vs_u32mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredmaxu_vs_u32m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredmaxu_vs_u32m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredmaxu_vs_u32m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredmaxu_vs_u32m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredmaxu_vs_u64m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredmaxu_vs_u64m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredmaxu_vs_u64m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredmaxu_vs_u64m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredmax_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredmax_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredmax_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredmax_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredmax_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredmax_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredmax_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredmax_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredmax_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredmax_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredmax_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredmax_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredmax_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredmax_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredmax_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredmax_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredmax_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredmax_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredmax_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredmax_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredmax_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredmax_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredmaxu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredmaxu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredmaxu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredmaxu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredmaxu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredmaxu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredmaxu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredmaxu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredmaxu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredmaxu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredmaxu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredmaxu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredmaxu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredmaxu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredmaxu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
index 5b8c52b6da9c6..8cb4711e6b823 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredmin_vs_i8mf8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredmin_vs_i8mf4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredmin_vs_i8mf2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredmin_vs_i8m1_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredmin_vs_i8m2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredmin_vs_i8m4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredmin_vs_i8m8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredmin_vs_i16mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredmin_vs_i16mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredmin_vs_i16m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredmin_vs_i16m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredmin_vs_i16m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredmin_vs_i16m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredmin_vs_i32mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredmin_vs_i32m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredmin_vs_i32m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredmin_vs_i32m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredmin_vs_i32m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredmin_vs_i64m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredmin_vs_i64m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredmin_vs_i64m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredmin_vs_i64m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredminu_vs_u8mf8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredminu_vs_u8mf4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredminu_vs_u8mf2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredminu_vs_u8m1_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredminu_vs_u8m2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredminu_vs_u8m4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredminu_vs_u8m8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredminu_vs_u16mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredminu_vs_u16mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredminu_vs_u16m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredminu_vs_u16m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredminu_vs_u16m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredminu_vs_u16m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredminu_vs_u32mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredminu_vs_u32m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredminu_vs_u32m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredminu_vs_u32m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredminu_vs_u32m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredminu_vs_u64m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredminu_vs_u64m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredminu_vs_u64m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredminu_vs_u64m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredmin_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredmin_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredmin_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredmin_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredmin_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredmin_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredmin_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredmin_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredmin_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredmin_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredmin_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredmin_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredmin_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredmin_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredmin_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredmin_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredmin_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredmin_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredmin_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredmin_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredmin_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredmin_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredminu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredminu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredminu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredminu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredminu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredminu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredminu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredminu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredminu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredminu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredminu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredminu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredminu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredminu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredminu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredminu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredminu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredminu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredminu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredminu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredminu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
index 65e1ecd70ff16..45b23ac73553c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredor_vs_i8m1_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredor_vs_i8m2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredor_vs_i8m4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredor_vs_i8m8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredor_vs_i16m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredor_vs_i16m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredor_vs_i16m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredor_vs_i16m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredor_vs_i32m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredor_vs_i32m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredor_vs_i32m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredor_vs_i32m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredor_vs_i64m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredor_vs_i64m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredor_vs_i64m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredor_vs_i64m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredor_vs_u8m1_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredor_vs_u8m2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredor_vs_u8m4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredor_vs_u8m8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredor_vs_u16m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredor_vs_u16m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredor_vs_u16m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredor_vs_u16m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredor_vs_u32m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredor_vs_u32m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredor_vs_u32m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredor_vs_u32m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredor_vs_u64m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredor_vs_u64m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredor_vs_u64m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredor_vs_u64m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
index efdf8bd397758..004bbeca5533b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredsum_vs_i8mf8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredsum_vs_i8mf4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredsum_vs_i8mf2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredsum_vs_i8m1_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredsum_vs_i8m2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredsum_vs_i8m4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredsum_vs_i8m8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredsum_vs_i16mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredsum_vs_i16mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredsum_vs_i16m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredsum_vs_i16m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredsum_vs_i16m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredsum_vs_i16m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredsum_vs_i32mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredsum_vs_i32m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredsum_vs_i32m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredsum_vs_i32m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredsum_vs_i32m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredsum_vs_i64m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredsum_vs_i64m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredsum_vs_i64m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredsum_vs_i64m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredsum_vs_u8mf8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredsum_vs_u8mf4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredsum_vs_u8mf2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredsum_vs_u8m1_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredsum_vs_u8m2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredsum_vs_u8m4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredsum_vs_u8m8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredsum_vs_u16mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredsum_vs_u16mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredsum_vs_u16m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredsum_vs_u16m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredsum_vs_u16m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredsum_vs_u16m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredsum_vs_u32mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredsum_vs_u32m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredsum_vs_u32m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredsum_vs_u32m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredsum_vs_u32m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredsum_vs_u64m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredsum_vs_u64m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredsum_vs_u64m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredsum_vs_u64m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredsum_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredsum_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredsum_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredsum_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredsum_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredsum_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredsum_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredsum_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredsum_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredsum_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredsum_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredsum_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredsum_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredsum_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredsum_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredsum_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredsum_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredsum_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredsum_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredsum_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredsum_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredsum_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredsum_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredsum_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredsum_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredsum_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredsum_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredsum_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredsum_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredsum_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredsum_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredsum_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredsum_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredsum_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredsum_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredsum_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredsum_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredsum_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredsum_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredsum_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredsum_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredsum_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredsum_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
index 0ed8193d08a71..e3b2f72e5ecb4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredxor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredxor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredxor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredxor_vs_i8m1_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredxor_vs_i8m2_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredxor_vs_i8m4_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredxor_vs_i8m8_i8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredxor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredxor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredxor_vs_i16m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredxor_vs_i16m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredxor_vs_i16m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredxor_vs_i16m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredxor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredxor_vs_i32m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredxor_vs_i32m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredxor_vs_i32m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredxor_vs_i32m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredxor_vs_i64m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredxor_vs_i64m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredxor_vs_i64m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredxor_vs_i64m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredxor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredxor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredxor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredxor_vs_u8m1_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredxor_vs_u8m2_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredxor_vs_u8m4_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredxor_vs_u8m8_u8m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredxor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredxor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredxor_vs_u16m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredxor_vs_u16m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredxor_vs_u16m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredxor_vs_u16m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredxor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredxor_vs_u32m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredxor_vs_u32m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredxor_vs_u32m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredxor_vs_u32m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredxor_vs_u64m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredxor_vs_u64m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredxor_vs_u64m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredxor_vs_u64m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredxor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredxor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredxor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredxor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredxor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredxor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredxor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredxor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredxor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredxor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredxor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredxor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredxor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredxor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredxor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredxor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredxor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredxor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredxor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredxor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredxor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredxor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredxor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredxor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredxor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredxor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredxor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredxor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredxor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredxor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredxor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredxor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredxor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredxor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredxor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredxor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredxor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredxor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredxor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredxor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredxor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredxor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredxor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
index 59b5451ae7651..1e5846a7b61d9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf8_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
@@ -14,7 +13,6 @@ vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) {
   return vreinterpret_v_i8mf8_u8mf8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
@@ -23,7 +21,6 @@ vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) {
   return vreinterpret_v_i8mf4_u8mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
@@ -32,7 +29,6 @@ vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) {
   return vreinterpret_v_i8mf2_u8mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
@@ -41,7 +37,6 @@ vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) {
   return vreinterpret_v_i8m1_u8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
@@ -50,7 +45,6 @@ vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) {
   return vreinterpret_v_i8m2_u8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
@@ -59,7 +53,6 @@ vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) {
   return vreinterpret_v_i8m4_u8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
@@ -68,7 +61,6 @@ vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) {
   return vreinterpret_v_i8m8_u8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
@@ -77,7 +69,6 @@ vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) {
   return vreinterpret_v_u8mf8_i8mf8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
@@ -86,7 +77,6 @@ vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) {
   return vreinterpret_v_u8mf4_i8mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
@@ -95,7 +85,6 @@ vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) {
   return vreinterpret_v_u8mf2_i8mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
@@ -104,7 +93,6 @@ vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) {
   return vreinterpret_v_u8m1_i8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
@@ -113,7 +101,6 @@ vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) {
   return vreinterpret_v_u8m2_i8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
@@ -122,7 +109,6 @@ vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) {
   return vreinterpret_v_u8m4_i8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
@@ -131,7 +117,6 @@ vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) {
   return vreinterpret_v_u8m8_i8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
@@ -140,7 +125,6 @@ vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) {
   return vreinterpret_v_i16mf4_u16mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
@@ -149,7 +133,6 @@ vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) {
   return vreinterpret_v_i16mf2_u16mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
@@ -158,7 +141,6 @@ vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) {
   return vreinterpret_v_i16m1_u16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
@@ -167,7 +149,6 @@ vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) {
   return vreinterpret_v_i16m2_u16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
@@ -176,7 +157,6 @@ vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) {
   return vreinterpret_v_i16m4_u16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
@@ -185,7 +165,6 @@ vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) {
   return vreinterpret_v_i16m8_u16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
@@ -194,7 +173,6 @@ vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) {
   return vreinterpret_v_u16mf4_i16mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
@@ -203,7 +181,6 @@ vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) {
   return vreinterpret_v_u16mf2_i16mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
@@ -212,7 +189,6 @@ vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) {
   return vreinterpret_v_u16m1_i16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
@@ -221,7 +197,6 @@ vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) {
   return vreinterpret_v_u16m2_i16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
@@ -230,7 +205,6 @@ vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) {
   return vreinterpret_v_u16m4_i16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
@@ -239,7 +213,6 @@ vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) {
   return vreinterpret_v_u16m8_i16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
@@ -248,7 +221,6 @@ vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) {
   return vreinterpret_v_i32mf2_u32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
@@ -257,7 +229,6 @@ vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) {
   return vreinterpret_v_i32m1_u32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
@@ -266,7 +237,6 @@ vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) {
   return vreinterpret_v_i32m2_u32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
@@ -275,7 +245,6 @@ vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) {
   return vreinterpret_v_i32m4_u32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
@@ -284,7 +253,6 @@ vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) {
   return vreinterpret_v_i32m8_u32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
@@ -293,7 +261,6 @@ vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) {
   return vreinterpret_v_u32mf2_i32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
@@ -302,7 +269,6 @@ vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) {
   return vreinterpret_v_u32m1_i32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
@@ -311,7 +277,6 @@ vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) {
   return vreinterpret_v_u32m2_i32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
@@ -320,7 +285,6 @@ vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) {
   return vreinterpret_v_u32m4_i32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
@@ -329,7 +293,6 @@ vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) {
   return vreinterpret_v_u32m8_i32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
@@ -339,7 +302,6 @@ vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) {
   return vreinterpret_v_f32mf2_i32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -349,7 +311,6 @@ vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) {
   return vreinterpret_v_f32m1_i32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -359,7 +320,6 @@ vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) {
   return vreinterpret_v_f32m2_i32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -369,7 +329,6 @@ vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) {
   return vreinterpret_v_f32m4_i32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -379,7 +338,6 @@ vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) {
   return vreinterpret_v_f32m8_i32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
@@ -389,7 +347,6 @@ vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) {
   return vreinterpret_v_f32mf2_u32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -399,7 +356,6 @@ vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) {
   return vreinterpret_v_f32m1_u32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -409,7 +365,6 @@ vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) {
   return vreinterpret_v_f32m2_u32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -419,7 +374,6 @@ vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) {
   return vreinterpret_v_f32m4_u32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -429,7 +383,6 @@ vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) {
   return vreinterpret_v_f32m8_u32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
@@ -439,7 +392,6 @@ vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) {
   return vreinterpret_v_i32mf2_f32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
@@ -449,7 +401,6 @@ vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) {
   return vreinterpret_v_i32m1_f32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
@@ -459,7 +410,6 @@ vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) {
   return vreinterpret_v_i32m2_f32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
@@ -469,7 +419,6 @@ vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) {
   return vreinterpret_v_i32m4_f32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
@@ -479,7 +428,6 @@ vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) {
   return vreinterpret_v_i32m8_f32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
@@ -489,7 +437,6 @@ vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) {
   return vreinterpret_v_u32mf2_f32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
@@ -499,7 +446,6 @@ vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) {
   return vreinterpret_v_u32m1_f32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
@@ -509,7 +455,6 @@ vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) {
   return vreinterpret_v_u32m2_f32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
@@ -519,7 +464,6 @@ vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) {
   return vreinterpret_v_u32m4_f32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
@@ -529,7 +473,6 @@ vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) {
   return vreinterpret_v_u32m8_f32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
@@ -538,7 +481,6 @@ vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) {
   return vreinterpret_v_i64m1_u64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
@@ -547,7 +489,6 @@ vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) {
   return vreinterpret_v_i64m2_u64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
@@ -556,7 +497,6 @@ vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) {
   return vreinterpret_v_i64m4_u64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
@@ -565,7 +505,6 @@ vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) {
   return vreinterpret_v_i64m8_u64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
@@ -574,7 +513,6 @@ vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) {
   return vreinterpret_v_u64m1_i64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
@@ -583,7 +521,6 @@ vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) {
   return vreinterpret_v_u64m2_i64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
@@ -592,7 +529,6 @@ vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) {
   return vreinterpret_v_u64m4_i64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
@@ -601,7 +537,6 @@ vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) {
   return vreinterpret_v_u64m8_i64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -611,7 +546,6 @@ vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) {
   return vreinterpret_v_f64m1_i64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -621,7 +555,6 @@ vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) {
   return vreinterpret_v_f64m2_i64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -631,7 +564,6 @@ vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) {
   return vreinterpret_v_f64m4_i64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -641,7 +573,6 @@ vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) {
   return vreinterpret_v_f64m8_i64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -651,7 +582,6 @@ vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) {
   return vreinterpret_v_f64m1_u64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -661,7 +591,6 @@ vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) {
   return vreinterpret_v_f64m2_u64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -671,7 +600,6 @@ vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) {
   return vreinterpret_v_f64m4_u64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -681,7 +609,6 @@ vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) {
   return vreinterpret_v_f64m8_u64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
@@ -691,7 +618,6 @@ vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) {
   return vreinterpret_v_i64m1_f64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
@@ -701,7 +627,6 @@ vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) {
   return vreinterpret_v_i64m2_f64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
@@ -711,7 +636,6 @@ vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) {
   return vreinterpret_v_i64m4_f64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
@@ -721,7 +645,6 @@ vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) {
   return vreinterpret_v_i64m8_f64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
@@ -731,7 +654,6 @@ vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) {
   return vreinterpret_v_u64m1_f64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
@@ -741,7 +663,6 @@ vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) {
   return vreinterpret_v_u64m2_f64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
@@ -751,7 +672,6 @@ vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) {
   return vreinterpret_v_u64m4_f64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
@@ -761,7 +681,6 @@ vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) {
   return vreinterpret_v_u64m8_f64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
@@ -771,7 +690,6 @@ vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) {
   return vreinterpret_v_i8mf4_i16mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
@@ -781,7 +699,6 @@ vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) {
   return vreinterpret_v_i8mf2_i16mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
@@ -791,7 +708,6 @@ vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) {
   return vreinterpret_v_i8m1_i16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
@@ -801,7 +717,6 @@ vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) {
   return vreinterpret_v_i8m2_i16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
@@ -811,7 +726,6 @@ vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) {
   return vreinterpret_v_i8m4_i16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
@@ -821,7 +735,6 @@ vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) {
   return vreinterpret_v_i8m8_i16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
@@ -831,7 +744,6 @@ vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) {
   return vreinterpret_v_u8mf4_u16mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
@@ -841,7 +753,6 @@ vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) {
   return vreinterpret_v_u8mf2_u16mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
@@ -851,7 +762,6 @@ vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) {
   return vreinterpret_v_u8m1_u16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
@@ -861,7 +771,6 @@ vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) {
   return vreinterpret_v_u8m2_u16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
@@ -871,7 +780,6 @@ vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) {
   return vreinterpret_v_u8m4_u16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
@@ -881,7 +789,6 @@ vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) {
   return vreinterpret_v_u8m8_u16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
@@ -891,7 +798,6 @@ vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) {
   return vreinterpret_v_i8mf2_i32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -901,7 +807,6 @@ vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) {
   return vreinterpret_v_i8m1_i32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -911,7 +816,6 @@ vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) {
   return vreinterpret_v_i8m2_i32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -921,7 +825,6 @@ vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) {
   return vreinterpret_v_i8m4_i32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -931,7 +834,6 @@ vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) {
   return vreinterpret_v_i8m8_i32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
@@ -941,7 +843,6 @@ vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) {
   return vreinterpret_v_u8mf2_u32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -951,7 +852,6 @@ vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) {
   return vreinterpret_v_u8m1_u32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -961,7 +861,6 @@ vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) {
   return vreinterpret_v_u8m2_u32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -971,7 +870,6 @@ vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) {
   return vreinterpret_v_u8m4_u32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -981,7 +879,6 @@ vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) {
   return vreinterpret_v_u8m8_u32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -991,7 +888,6 @@ vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) {
   return vreinterpret_v_i8m1_i64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -1001,7 +897,6 @@ vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) {
   return vreinterpret_v_i8m2_i64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -1011,7 +906,6 @@ vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) {
   return vreinterpret_v_i8m4_i64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -1021,7 +915,6 @@ vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) {
   return vreinterpret_v_i8m8_i64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -1031,7 +924,6 @@ vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) {
   return vreinterpret_v_u8m1_u64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -1041,7 +933,6 @@ vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) {
   return vreinterpret_v_u8m2_u64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -1051,7 +942,6 @@ vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) {
   return vreinterpret_v_u8m4_u64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -1061,7 +951,6 @@ vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) {
   return vreinterpret_v_u8m8_u64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
@@ -1071,7 +960,6 @@ vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) {
   return vreinterpret_v_i16mf4_i8mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
@@ -1081,7 +969,6 @@ vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) {
   return vreinterpret_v_i16mf2_i8mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
@@ -1091,7 +978,6 @@ vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) {
   return vreinterpret_v_i16m1_i8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
@@ -1101,7 +987,6 @@ vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) {
   return vreinterpret_v_i16m2_i8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
@@ -1111,7 +996,6 @@ vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) {
   return vreinterpret_v_i16m4_i8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
@@ -1121,7 +1005,6 @@ vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) {
   return vreinterpret_v_i16m8_i8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
@@ -1131,7 +1014,6 @@ vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) {
   return vreinterpret_v_u16mf4_u8mf4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
@@ -1141,7 +1023,6 @@ vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) {
   return vreinterpret_v_u16mf2_u8mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
@@ -1151,7 +1032,6 @@ vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) {
   return vreinterpret_v_u16m1_u8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
@@ -1161,7 +1041,6 @@ vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) {
   return vreinterpret_v_u16m2_u8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
@@ -1171,7 +1050,6 @@ vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) {
   return vreinterpret_v_u16m4_u8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
@@ -1181,7 +1059,6 @@ vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) {
   return vreinterpret_v_u16m8_u8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
@@ -1191,7 +1068,6 @@ vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) {
   return vreinterpret_v_i16mf2_i32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -1201,7 +1077,6 @@ vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) {
   return vreinterpret_v_i16m1_i32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -1211,7 +1086,6 @@ vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) {
   return vreinterpret_v_i16m2_i32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -1221,7 +1095,6 @@ vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) {
   return vreinterpret_v_i16m4_i32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -1231,7 +1104,6 @@ vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) {
   return vreinterpret_v_i16m8_i32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
@@ -1241,7 +1113,6 @@ vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) {
   return vreinterpret_v_u16mf2_u32mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -1251,7 +1122,6 @@ vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) {
   return vreinterpret_v_u16m1_u32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -1261,7 +1131,6 @@ vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) {
   return vreinterpret_v_u16m2_u32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -1271,7 +1140,6 @@ vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) {
   return vreinterpret_v_u16m4_u32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -1281,7 +1149,6 @@ vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) {
   return vreinterpret_v_u16m8_u32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -1291,7 +1158,6 @@ vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) {
   return vreinterpret_v_i16m1_i64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -1301,7 +1167,6 @@ vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) {
   return vreinterpret_v_i16m2_i64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -1311,7 +1176,6 @@ vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) {
   return vreinterpret_v_i16m4_i64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -1321,7 +1185,6 @@ vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) {
   return vreinterpret_v_i16m8_i64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -1331,7 +1194,6 @@ vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) {
   return vreinterpret_v_u16m1_u64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -1341,7 +1203,6 @@ vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) {
   return vreinterpret_v_u16m2_u64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -1351,7 +1212,6 @@ vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) {
   return vreinterpret_v_u16m4_u64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -1361,7 +1221,6 @@ vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) {
   return vreinterpret_v_u16m8_u64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
@@ -1371,7 +1230,6 @@ vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) {
   return vreinterpret_v_i32mf2_i8mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
@@ -1381,7 +1239,6 @@ vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) {
   return vreinterpret_v_i32m1_i8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
@@ -1391,7 +1248,6 @@ vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) {
   return vreinterpret_v_i32m2_i8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
@@ -1401,7 +1257,6 @@ vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) {
   return vreinterpret_v_i32m4_i8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
@@ -1411,7 +1266,6 @@ vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) {
   return vreinterpret_v_i32m8_i8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
@@ -1421,7 +1275,6 @@ vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) {
   return vreinterpret_v_u32mf2_u8mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
@@ -1431,7 +1284,6 @@ vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) {
   return vreinterpret_v_u32m1_u8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
@@ -1441,7 +1293,6 @@ vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) {
   return vreinterpret_v_u32m2_u8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
@@ -1451,7 +1302,6 @@ vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) {
   return vreinterpret_v_u32m4_u8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
@@ -1461,7 +1311,6 @@ vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) {
   return vreinterpret_v_u32m8_u8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
@@ -1471,7 +1320,6 @@ vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) {
   return vreinterpret_v_i32mf2_i16mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
@@ -1481,7 +1329,6 @@ vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) {
   return vreinterpret_v_i32m1_i16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
@@ -1491,7 +1338,6 @@ vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) {
   return vreinterpret_v_i32m2_i16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
@@ -1501,7 +1347,6 @@ vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) {
   return vreinterpret_v_i32m4_i16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
@@ -1511,7 +1356,6 @@ vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) {
   return vreinterpret_v_i32m8_i16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
@@ -1521,7 +1365,6 @@ vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) {
   return vreinterpret_v_u32mf2_u16mf2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
@@ -1531,7 +1374,6 @@ vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) {
   return vreinterpret_v_u32m1_u16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
@@ -1541,7 +1383,6 @@ vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) {
   return vreinterpret_v_u32m2_u16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
@@ -1551,7 +1392,6 @@ vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) {
   return vreinterpret_v_u32m4_u16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
@@ -1561,7 +1401,6 @@ vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) {
   return vreinterpret_v_u32m8_u16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -1571,7 +1410,6 @@ vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) {
   return vreinterpret_v_i32m1_i64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -1581,7 +1419,6 @@ vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) {
   return vreinterpret_v_i32m2_i64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -1591,7 +1428,6 @@ vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) {
   return vreinterpret_v_i32m4_i64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -1601,7 +1437,6 @@ vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) {
   return vreinterpret_v_i32m8_i64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
@@ -1611,7 +1446,6 @@ vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) {
   return vreinterpret_v_u32m1_u64m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
@@ -1621,7 +1455,6 @@ vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) {
   return vreinterpret_v_u32m2_u64m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
@@ -1631,7 +1464,6 @@ vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) {
   return vreinterpret_v_u32m4_u64m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
@@ -1641,7 +1473,6 @@ vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) {
   return vreinterpret_v_u32m8_u64m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
@@ -1651,7 +1482,6 @@ vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) {
   return vreinterpret_v_i64m1_i8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
@@ -1661,7 +1491,6 @@ vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) {
   return vreinterpret_v_i64m2_i8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
@@ -1671,7 +1500,6 @@ vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) {
   return vreinterpret_v_i64m4_i8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
@@ -1681,7 +1509,6 @@ vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) {
   return vreinterpret_v_i64m8_i8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
@@ -1691,7 +1518,6 @@ vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) {
   return vreinterpret_v_u64m1_u8m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
@@ -1701,7 +1527,6 @@ vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) {
   return vreinterpret_v_u64m2_u8m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
@@ -1711,7 +1536,6 @@ vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) {
   return vreinterpret_v_u64m4_u8m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
@@ -1721,7 +1545,6 @@ vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) {
   return vreinterpret_v_u64m8_u8m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
@@ -1731,7 +1554,6 @@ vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) {
   return vreinterpret_v_i64m1_i16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
@@ -1741,7 +1563,6 @@ vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) {
   return vreinterpret_v_i64m2_i16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
@@ -1751,7 +1572,6 @@ vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) {
   return vreinterpret_v_i64m4_i16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
@@ -1761,7 +1581,6 @@ vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) {
   return vreinterpret_v_i64m8_i16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
@@ -1771,7 +1590,6 @@ vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) {
   return vreinterpret_v_u64m1_u16m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
@@ -1781,7 +1599,6 @@ vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) {
   return vreinterpret_v_u64m2_u16m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
@@ -1791,7 +1608,6 @@ vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) {
   return vreinterpret_v_u64m4_u16m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
@@ -1801,7 +1617,6 @@ vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) {
   return vreinterpret_v_u64m8_u16m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -1811,7 +1626,6 @@ vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) {
   return vreinterpret_v_i64m1_i32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -1821,7 +1635,6 @@ vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) {
   return vreinterpret_v_i64m2_i32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -1831,7 +1644,6 @@ vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) {
   return vreinterpret_v_i64m4_i32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
@@ -1841,7 +1653,6 @@ vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) {
   return vreinterpret_v_i64m8_i32m8(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
@@ -1851,7 +1662,6 @@ vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) {
   return vreinterpret_v_u64m1_u32m1(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
@@ -1861,7 +1671,6 @@ vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) {
   return vreinterpret_v_u64m2_u32m2(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
@@ -1871,7 +1680,6 @@ vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) {
   return vreinterpret_v_u64m4_u32m4(src);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
index e62fc26f57af2..97282852b2147 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vrem_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vrem_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vrem_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vrem_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vrem_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vrem_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vrem_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vrem_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vrem_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vrem_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vrem_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vrem_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vrem_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vrem_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vrem_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vrem_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vrem_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vrem_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vrem_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vrem_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vrem_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vrem_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vrem_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vrem_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vrem_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vrem_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vrem_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vrem_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vrem_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vrem_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vrem_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vrem_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vrem_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vrem_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vrem_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vrem_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vrem_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vrem_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vremu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vremu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vremu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vremu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vremu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vremu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vremu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vremu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vremu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vremu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vremu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vremu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vremu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vremu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vremu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vremu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vremu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vremu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vremu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vremu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vremu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vremu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vremu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vremu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vremu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vremu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vremu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vremu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vremu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vremu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vremu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vremu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vremu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vremu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vremu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vremu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vremu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vremu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vrem_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vrem_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vrem_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vrem_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vrem_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vrem_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vrem_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vrem_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vrem_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vrem_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vrem_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vrem_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vrem_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vrem_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vrem_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vrem_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vrem_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vrem_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vrem_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vrem_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vrem_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vrem_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vrem_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vrem_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vrem_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vrem_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vrem_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vrem_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vrem_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vrem_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vrem_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vrem_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vrem_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vrem_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vrem_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vrem_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vrem_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vrem_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vrem_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vrem_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vrem_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vrem_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vrem_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vrem_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vremu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vremu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vremu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vremu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vremu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vremu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vremu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vremu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vremu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vremu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vremu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vremu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vremu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vremu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vremu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vremu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vremu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vremu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vremu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vremu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vremu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vremu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vremu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vremu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vremu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vremu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vremu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vremu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vremu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vremu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vremu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vremu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vremu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vremu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vremu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vremu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vremu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vremu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vremu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vremu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vremu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vremu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
   return vremu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
index c6b3c102a0833..b42c54138cf3d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
   return vrgather_vv_i8mf8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8mf8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
   return vrgather_vv_i8mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
   return vrgather_vv_i8mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
   return vrgather_vv_i8m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
   return vrgather_vv_i8m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -118,7 +107,6 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
   return vrgather_vv_i8m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -138,7 +125,6 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
   return vrgather_vv_i8m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -148,7 +134,6 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_i8m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -159,7 +144,6 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
   return vrgather_vv_i16mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -169,7 +153,6 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
   return vrgather_vx_i16mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -180,7 +163,6 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
   return vrgather_vv_i16mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -190,7 +172,6 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i16mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -201,7 +182,6 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
   return vrgather_vv_i16m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -211,7 +191,6 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_i16m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -222,7 +201,6 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
   return vrgather_vv_i16m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -232,7 +210,6 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i16m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -243,7 +220,6 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
   return vrgather_vv_i16m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -253,7 +229,6 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_i16m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -264,7 +239,6 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
   return vrgather_vv_i16m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -274,7 +248,6 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_i16m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -285,7 +258,6 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
   return vrgather_vv_i32mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -295,7 +267,6 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i32mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -306,7 +277,6 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
   return vrgather_vv_i32m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -316,7 +286,6 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_i32m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -327,7 +296,6 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
   return vrgather_vv_i32m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -337,7 +305,6 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i32m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -348,7 +315,6 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
   return vrgather_vv_i32m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -358,7 +324,6 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_i32m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -369,7 +334,6 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
   return vrgather_vv_i32m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -379,7 +343,6 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_i32m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -390,7 +353,6 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
   return vrgather_vv_i64m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -400,7 +362,6 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_i64m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -411,7 +372,6 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
   return vrgather_vv_i64m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -421,7 +381,6 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_i64m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -432,7 +391,6 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
   return vrgather_vv_i64m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -442,7 +400,6 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_i64m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -453,7 +410,6 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
   return vrgather_vv_i64m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -463,7 +419,6 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_i64m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -474,7 +429,6 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
   return vrgather_vv_u8mf8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -484,7 +438,6 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8mf8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -495,7 +448,6 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
   return vrgather_vv_u8mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -505,7 +457,6 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -516,7 +467,6 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
   return vrgather_vv_u8mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -526,7 +476,6 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -536,7 +485,6 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
   return vrgather_vv_u8m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -546,7 +494,6 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -556,7 +503,6 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
   return vrgather_vv_u8m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -566,7 +512,6 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -576,7 +521,6 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
   return vrgather_vv_u8m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -586,7 +530,6 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -596,7 +539,6 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
   return vrgather_vv_u8m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -606,7 +548,6 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_u8m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -617,7 +558,6 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
   return vrgather_vv_u16mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -628,7 +568,6 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
   return vrgather_vx_u16mf4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -639,7 +578,6 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
   return vrgather_vv_u16mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -650,7 +588,6 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
   return vrgather_vx_u16mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -661,7 +598,6 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
   return vrgather_vv_u16m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -671,7 +607,6 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_u16m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -682,7 +617,6 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
   return vrgather_vv_u16m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -692,7 +626,6 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_u16m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -703,7 +636,6 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
   return vrgather_vv_u16m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -713,7 +645,6 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_u16m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -724,7 +655,6 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
   return vrgather_vv_u16m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -734,7 +664,6 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_u16m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -745,7 +674,6 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
   return vrgather_vv_u32mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -756,7 +684,6 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
   return vrgather_vx_u32mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -767,7 +694,6 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
   return vrgather_vv_u32m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -777,7 +703,6 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_u32m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -788,7 +713,6 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
   return vrgather_vv_u32m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -798,7 +722,6 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_u32m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -809,7 +732,6 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
   return vrgather_vv_u32m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -819,7 +741,6 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_u32m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -830,7 +751,6 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
   return vrgather_vv_u32m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -840,7 +760,6 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_u32m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -851,7 +770,6 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
   return vrgather_vv_u64m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -861,7 +779,6 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_u64m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -872,7 +789,6 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
   return vrgather_vv_u64m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -882,7 +798,6 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_u64m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -893,7 +808,6 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
   return vrgather_vv_u64m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -903,7 +817,6 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_u64m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -914,7 +827,6 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
   return vrgather_vv_u64m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -924,7 +836,6 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_u64m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -935,7 +846,6 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
   return vrgather_vv_f32mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -946,7 +856,6 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
   return vrgather_vx_f32mf2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -957,7 +866,6 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
   return vrgather_vv_f32m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -967,7 +875,6 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_f32m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -978,7 +885,6 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
   return vrgather_vv_f32m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -988,7 +894,6 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_f32m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -999,7 +904,6 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
   return vrgather_vv_f32m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +913,6 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_f32m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +923,6 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
   return vrgather_vv_f32m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +932,6 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_f32m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +942,6 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
   return vrgather_vv_f64m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1051,7 +951,6 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
   return vrgather_vx_f64m1(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1062,7 +961,6 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
   return vrgather_vv_f64m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +970,6 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
   return vrgather_vx_f64m2(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1083,7 +980,6 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
   return vrgather_vv_f64m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1093,7 +989,6 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
   return vrgather_vx_f64m4(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +999,6 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
   return vrgather_vv_f64m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1008,6 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
   return vrgather_vx_f64m8(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1125,7 +1018,6 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1136,7 +1028,6 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1147,7 +1038,6 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1157,7 +1047,6 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
   return vrgatherei16_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1167,7 +1056,6 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
   return vrgatherei16_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1177,7 +1065,6 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
   return vrgatherei16_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1188,7 +1075,6 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1199,7 +1085,6 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1210,7 +1095,6 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1105,6 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1232,7 +1115,6 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
   return vrgatherei16_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1243,7 +1125,6 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
   return vrgatherei16_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1135,6 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1145,6 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1155,6 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1287,7 +1165,6 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1175,6 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
   return vrgatherei16_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1309,7 +1185,6 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1320,7 +1195,6 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1331,7 +1205,6 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1342,7 +1215,6 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1225,6 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1235,6 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1245,6 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1255,6 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1397,7 +1265,6 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
   return vrgatherei16_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1408,7 +1275,6 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
   return vrgatherei16_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1419,7 +1285,6 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1430,7 +1295,6 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1441,7 +1305,6 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1452,7 +1315,6 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1463,7 +1325,6 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
   return vrgatherei16_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1335,6 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
   return vrgatherei16_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1485,7 +1345,6 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1496,7 +1355,6 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1507,7 +1365,6 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1518,7 +1375,6 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1529,7 +1385,6 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
   return vrgatherei16_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1540,7 +1395,6 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1551,7 +1405,6 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1562,7 +1415,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1573,7 +1425,6 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1435,6 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_f32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1595,7 +1445,6 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_f32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1455,6 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_f32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1617,7 +1465,6 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_f32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1628,7 +1475,6 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
   return vrgatherei16_vv_f32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1639,7 +1485,6 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
   return vrgatherei16_vv_f64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1650,7 +1495,6 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
   return vrgatherei16_vv_f64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1661,7 +1505,6 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
   return vrgatherei16_vv_f64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1515,6 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
   return vrgatherei16_vv_f64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1526,6 @@ vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vrgather_vv_i8mf8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1536,6 @@ vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vrgather_vx_i8mf8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1547,6 @@ vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vrgather_vv_i8mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1557,6 @@ vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vrgather_vx_i8mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1568,6 @@ vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vrgather_vv_i8mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1578,6 @@ vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vrgather_vx_i8mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1752,7 +1588,6 @@ vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vrgather_vv_i8m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1763,7 +1598,6 @@ vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vrgather_vx_i8m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1608,6 @@ vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vrgather_vv_i8m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1618,6 @@ vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vrgather_vx_i8m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1628,6 @@ vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vrgather_vv_i8m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1807,7 +1638,6 @@ vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vrgather_vx_i8m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1818,7 +1648,6 @@ vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vrgather_vv_i8m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1829,7 +1658,6 @@ vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vrgather_vx_i8m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1841,7 +1669,6 @@ vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vrgather_vv_i16mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1853,7 +1680,6 @@ vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vrgather_vx_i16mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1865,7 +1691,6 @@ vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vrgather_vv_i16mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1877,7 +1702,6 @@ vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vrgather_vx_i16mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1889,7 +1713,6 @@ vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vrgather_vv_i16m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1900,7 +1723,6 @@ vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vrgather_vx_i16m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1912,7 +1734,6 @@ vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vrgather_vv_i16m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1923,7 +1744,6 @@ vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vrgather_vx_i16m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1935,7 +1755,6 @@ vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vrgather_vv_i16m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1946,7 +1765,6 @@ vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vrgather_vx_i16m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1958,7 +1776,6 @@ vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vrgather_vv_i16m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1969,7 +1786,6 @@ vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vrgather_vx_i16m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1981,7 +1797,6 @@ vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vrgather_vv_i32mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1993,7 +1808,6 @@ vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vrgather_vx_i32mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2005,7 +1819,6 @@ vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vrgather_vv_i32m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2016,7 +1829,6 @@ vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vrgather_vx_i32m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2028,7 +1840,6 @@ vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vrgather_vv_i32m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2039,7 +1850,6 @@ vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vrgather_vx_i32m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2051,7 +1861,6 @@ vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vrgather_vv_i32m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2062,7 +1871,6 @@ vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vrgather_vx_i32m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2074,7 +1882,6 @@ vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vrgather_vv_i32m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2085,7 +1892,6 @@ vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vrgather_vx_i32m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2097,7 +1903,6 @@ vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vrgather_vv_i64m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2108,7 +1913,6 @@ vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vrgather_vx_i64m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2120,7 +1924,6 @@ vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vrgather_vv_i64m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2131,7 +1934,6 @@ vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vrgather_vx_i64m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2143,7 +1945,6 @@ vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vrgather_vv_i64m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2154,7 +1955,6 @@ vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vrgather_vx_i64m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2166,7 +1966,6 @@ vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vrgather_vv_i64m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2177,7 +1976,6 @@ vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vrgather_vx_i64m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2189,7 +1987,6 @@ vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vrgather_vv_u8mf8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2200,7 +1997,6 @@ vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vrgather_vx_u8mf8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2212,7 +2008,6 @@ vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vrgather_vv_u8mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2223,7 +2018,6 @@ vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vrgather_vx_u8mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2235,7 +2029,6 @@ vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vrgather_vv_u8mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2246,7 +2039,6 @@ vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vrgather_vx_u8mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2258,7 +2050,6 @@ vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vrgather_vv_u8m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2269,7 +2060,6 @@ vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vrgather_vx_u8m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2281,7 +2071,6 @@ vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vrgather_vv_u8m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2292,7 +2081,6 @@ vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vrgather_vx_u8m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2304,7 +2092,6 @@ vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vrgather_vv_u8m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2315,7 +2102,6 @@ vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vrgather_vx_u8m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2327,7 +2113,6 @@ vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vrgather_vv_u8m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2338,7 +2123,6 @@ vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vrgather_vx_u8m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2350,7 +2134,6 @@ vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vrgather_vv_u16mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2362,7 +2145,6 @@ vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vrgather_vx_u16mf4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2374,7 +2156,6 @@ vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vrgather_vv_u16mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2386,7 +2167,6 @@ vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vrgather_vx_u16mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2398,7 +2178,6 @@ vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vrgather_vv_u16m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2409,7 +2188,6 @@ vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vrgather_vx_u16m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2421,7 +2199,6 @@ vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vrgather_vv_u16m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2432,7 +2209,6 @@ vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vrgather_vx_u16m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2444,7 +2220,6 @@ vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vrgather_vv_u16m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2455,7 +2230,6 @@ vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vrgather_vx_u16m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2467,7 +2241,6 @@ vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vrgather_vv_u16m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2478,7 +2251,6 @@ vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vrgather_vx_u16m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2490,7 +2262,6 @@ vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vrgather_vv_u32mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2502,7 +2273,6 @@ vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vrgather_vx_u32mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2514,7 +2284,6 @@ vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vrgather_vv_u32m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2525,7 +2294,6 @@ vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vrgather_vx_u32m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2537,7 +2305,6 @@ vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vrgather_vv_u32m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2548,7 +2315,6 @@ vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vrgather_vx_u32m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2560,7 +2326,6 @@ vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vrgather_vv_u32m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2571,7 +2336,6 @@ vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vrgather_vx_u32m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2583,7 +2347,6 @@ vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vrgather_vv_u32m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2594,7 +2357,6 @@ vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vrgather_vx_u32m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2606,7 +2368,6 @@ vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vrgather_vv_u64m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2617,7 +2378,6 @@ vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vrgather_vx_u64m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2629,7 +2389,6 @@ vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vrgather_vv_u64m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2640,7 +2399,6 @@ vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vrgather_vx_u64m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2652,7 +2410,6 @@ vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vrgather_vv_u64m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2663,7 +2420,6 @@ vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vrgather_vx_u64m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2675,7 +2431,6 @@ vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vrgather_vv_u64m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2686,7 +2441,6 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vrgather_vx_u64m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2698,7 +2452,6 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vrgather_vv_f32mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2710,7 +2463,6 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vrgather_vx_f32mf2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2722,7 +2474,6 @@ vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vrgather_vv_f32m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2734,7 +2485,6 @@ vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
   return vrgather_vx_f32m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2746,7 +2496,6 @@ vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vrgather_vv_f32m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2758,7 +2507,6 @@ vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
   return vrgather_vx_f32m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2770,7 +2518,6 @@ vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vrgather_vv_f32m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2782,7 +2529,6 @@ vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vrgather_vx_f32m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2794,7 +2540,6 @@ vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vrgather_vv_f32m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2806,7 +2551,6 @@ vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vrgather_vx_f32m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2818,7 +2562,6 @@ vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vrgather_vv_f64m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2830,7 +2573,6 @@ vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
   return vrgather_vx_f64m1_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2842,7 +2584,6 @@ vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vrgather_vv_f64m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2854,7 +2595,6 @@ vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
   return vrgather_vx_f64m2_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2866,7 +2606,6 @@ vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vrgather_vv_f64m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2878,7 +2617,6 @@ vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
   return vrgather_vx_f64m4_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2890,7 +2628,6 @@ vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vrgather_vv_f64m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2902,7 +2639,6 @@ vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
   return vrgather_vx_f64m8_m(mask, maskedoff, op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2914,7 +2650,6 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vrgatherei16_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2926,7 +2661,6 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vrgatherei16_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2938,7 +2672,6 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vrgatherei16_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2950,7 +2683,6 @@ vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vrgatherei16_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2962,7 +2694,6 @@ vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vrgatherei16_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2974,7 +2705,6 @@ vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vrgatherei16_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2986,7 +2716,6 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vrgatherei16_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2998,7 +2727,6 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vrgatherei16_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3010,7 +2738,6 @@ vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vrgatherei16_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3022,7 +2749,6 @@ vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vrgatherei16_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3034,7 +2760,6 @@ vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vrgatherei16_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3046,7 +2771,6 @@ vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vrgatherei16_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3058,7 +2782,6 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vrgatherei16_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3070,7 +2793,6 @@ vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vrgatherei16_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3082,7 +2804,6 @@ vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vrgatherei16_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3094,7 +2815,6 @@ vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vrgatherei16_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3106,7 +2826,6 @@ vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vrgatherei16_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3118,7 +2837,6 @@ vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vrgatherei16_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3130,7 +2848,6 @@ vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vrgatherei16_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3142,7 +2859,6 @@ vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vrgatherei16_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3154,7 +2870,6 @@ vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vrgatherei16_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3166,7 +2881,6 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vrgatherei16_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3178,7 +2892,6 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vrgatherei16_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3190,7 +2903,6 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vrgatherei16_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3202,7 +2914,6 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vrgatherei16_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3214,7 +2925,6 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vrgatherei16_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3226,7 +2936,6 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vrgatherei16_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3239,7 +2948,6 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask,
   return vrgatherei16_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3252,7 +2960,6 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask,
   return vrgatherei16_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3264,7 +2971,6 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vrgatherei16_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3276,7 +2982,6 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vrgatherei16_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3288,7 +2993,6 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vrgatherei16_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3300,7 +3004,6 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vrgatherei16_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3313,7 +3016,6 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask,
   return vrgatherei16_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3325,7 +3027,6 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vrgatherei16_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3337,7 +3038,6 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vrgatherei16_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3349,7 +3049,6 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vrgatherei16_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3361,7 +3060,6 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vrgatherei16_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3373,7 +3071,6 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vrgatherei16_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3385,7 +3082,6 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vrgatherei16_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3397,7 +3093,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vrgatherei16_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3409,7 +3104,6 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vrgatherei16_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3422,7 +3116,6 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask,
   return vrgatherei16_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3435,7 +3128,6 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask,
   return vrgatherei16_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3448,7 +3140,6 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask,
   return vrgatherei16_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3460,7 +3151,6 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
   return vrgatherei16_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3472,7 +3162,6 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
   return vrgatherei16_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3485,7 +3174,6 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask,
   return vrgatherei16_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3498,7 +3186,6 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask,
   return vrgatherei16_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3511,7 +3198,6 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask,
   return vrgatherei16_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
index 88848a0bd71cd..73cadfda74be7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vrsub_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vrsub_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vrsub_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vrsub_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vrsub_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vrsub_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vrsub_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vrsub_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vrsub_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vrsub_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vrsub_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vrsub_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vrsub_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vrsub_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vrsub_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vrsub_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vrsub_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vrsub_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vrsub_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vrsub_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vrsub_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vrsub_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vrsub_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vrsub_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vrsub_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vrsub_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vrsub_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vrsub_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vrsub_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vrsub_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vrsub_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vrsub_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
   return vrsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
   return vrsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
   return vrsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
   return vrsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
   return vrsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
   return vrsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
   return vrsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16
   return vrsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16
   return vrsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
   return vrsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vrsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vrsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vrsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32
   return vrsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
   return vrsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
   return vrsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vrsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vrsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
   return vrsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
   return vrsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
   return vrsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vrsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
   return vrsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
   return vrsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
   return vrsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
   return vrsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
   return vrsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
   return vrsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
   return vrsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin
   return vrsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin
   return vrsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16
   return vrsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
   return vrsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
   return vrsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
   return vrsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin
   return vrsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32
   return vrsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32
   return vrsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
   return vrsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
   return vrsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64
   return vrsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64
   return vrsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64
   return vrsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
index 9ab64dcbf7d35..5b843e1b3b299 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vsadd_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vsadd_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vsadd_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vsadd_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vsadd_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vsadd_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vsadd_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vsadd_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vsadd_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vsadd_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vsadd_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vsadd_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vsadd_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vsadd_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vsadd_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vsadd_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vsadd_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vsadd_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vsadd_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vsadd_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vsadd_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vsadd_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vsadd_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vsadd_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vsadd_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vsadd_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vsadd_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vsadd_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vsadd_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vsadd_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vsadd_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vsadd_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vsadd_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vsadd_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vsadd_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vsadd_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vsadd_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vsadd_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vsaddu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vsaddu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vsaddu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vsaddu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vsaddu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vsaddu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vsaddu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vsaddu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vsaddu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vsaddu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vsaddu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vsaddu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vsaddu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vsaddu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vsaddu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vsaddu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vsaddu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vsaddu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vsaddu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vsaddu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vsaddu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vsaddu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vsaddu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vsaddu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vsaddu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vsaddu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vsaddu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vsaddu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vsaddu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vsaddu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vsaddu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vsaddu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vsaddu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vsaddu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vsaddu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vsaddu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vsaddu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -887,7 +799,6 @@ vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vsaddu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -898,7 +809,6 @@ vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vsadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -909,7 +819,6 @@ vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vsadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +829,6 @@ vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vsadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vsadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -942,7 +849,6 @@ vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vsadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -953,7 +859,6 @@ vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vsadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +869,6 @@ vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -975,7 +879,6 @@ vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -986,7 +889,6 @@ vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -997,7 +899,6 @@ vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1008,7 +909,6 @@ vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1019,7 +919,6 @@ vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +929,6 @@ vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +939,6 @@ vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +950,6 @@ vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vsadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +960,6 @@ vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vsadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +971,6 @@ vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vsadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1087,7 +981,6 @@ vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vsadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1098,7 +991,6 @@ vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1109,7 +1001,6 @@ vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1120,7 +1011,6 @@ vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1021,6 @@ vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1031,6 @@ vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1153,7 +1041,6 @@ vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1051,6 @@ vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1175,7 +1061,6 @@ vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1072,6 @@ vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vsadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1198,7 +1082,6 @@ vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vsadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1209,7 +1092,6 @@ vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1220,7 +1102,6 @@ vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1231,7 +1112,6 @@ vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1242,7 +1122,6 @@ vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1253,7 +1132,6 @@ vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1142,6 @@ vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1275,7 +1152,6 @@ vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1162,6 @@ vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1297,7 +1172,6 @@ vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1308,7 +1182,6 @@ vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1319,7 +1192,6 @@ vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1330,7 +1202,6 @@ vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1341,7 +1212,6 @@ vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1352,7 +1222,6 @@ vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1363,7 +1232,6 @@ vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1242,6 @@ vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1253,6 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vsaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1397,7 +1263,6 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vsaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1274,6 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vsaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1420,7 +1284,6 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vsaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1295,6 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vsaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1443,7 +1305,6 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vsaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1315,6 @@ vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1325,6 @@ vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1335,6 @@ vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1487,7 +1345,6 @@ vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1498,7 +1355,6 @@ vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1509,7 +1365,6 @@ vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1520,7 +1375,6 @@ vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1531,7 +1385,6 @@ vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1396,6 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vsaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1407,6 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vsaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1418,6 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vsaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1429,6 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vsaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1440,6 @@ vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1602,7 +1450,6 @@ vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1461,6 @@ vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1471,6 @@ vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1482,6 @@ vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vsaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vsaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1588,6 @@ vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1598,6 @@ vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1609,6 @@ vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1619,6 @@ vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1630,6 @@ vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1810,7 +1640,6 @@ vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1651,6 @@ vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1833,7 +1661,6 @@ vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1672,6 @@ vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1682,6 @@ vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1693,6 @@ vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
index 10a4e76227ff0..00b3660c19a75 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
   return vsbc_vvm_i8mf8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
   return vsbc_vxm_i8mf8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
   return vsbc_vvm_i8mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
   return vsbc_vxm_i8mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
   return vsbc_vvm_i8mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
   return vsbc_vxm_i8mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
   return vsbc_vvm_i8m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
   return vsbc_vxm_i8m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
   return vsbc_vvm_i8m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
   return vsbc_vxm_i8m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
   return vsbc_vvm_i8m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
   return vsbc_vxm_i8m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
   return vsbc_vvm_i8m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
   return vsbc_vxm_i8m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
   return vsbc_vvm_i16mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
   return vsbc_vxm_i16mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
   return vsbc_vvm_i16mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
   return vsbc_vxm_i16mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
   return vsbc_vvm_i16m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
   return vsbc_vxm_i16m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
   return vsbc_vvm_i16m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
   return vsbc_vxm_i16m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
   return vsbc_vvm_i16m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
   return vsbc_vxm_i16m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
   return vsbc_vvm_i16m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
   return vsbc_vxm_i16m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
   return vsbc_vvm_i32mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
   return vsbc_vxm_i32mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
   return vsbc_vvm_i32m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
   return vsbc_vxm_i32m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
   return vsbc_vvm_i32m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
   return vsbc_vxm_i32m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
   return vsbc_vvm_i32m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
   return vsbc_vxm_i32m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
   return vsbc_vvm_i32m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
   return vsbc_vxm_i32m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
   return vsbc_vvm_i64m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
   return vsbc_vxm_i64m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
   return vsbc_vvm_i64m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
   return vsbc_vxm_i64m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
   return vsbc_vvm_i64m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
   return vsbc_vxm_i64m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
   return vsbc_vvm_i64m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
   return vsbc_vxm_i64m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -499,7 +454,6 @@ vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
   return vsbc_vvm_u8mf8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -510,7 +464,6 @@ vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
   return vsbc_vxm_u8mf8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +474,6 @@ vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
   return vsbc_vvm_u8mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +484,6 @@ vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
   return vsbc_vxm_u8mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -543,7 +494,6 @@ vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
   return vsbc_vvm_u8mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -554,7 +504,6 @@ vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
   return vsbc_vxm_u8mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -565,7 +514,6 @@ vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
   return vsbc_vvm_u8m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -576,7 +524,6 @@ vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
   return vsbc_vxm_u8m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -587,7 +534,6 @@ vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
   return vsbc_vvm_u8m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -598,7 +544,6 @@ vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
   return vsbc_vxm_u8m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -609,7 +554,6 @@ vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
   return vsbc_vvm_u8m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -620,7 +564,6 @@ vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
   return vsbc_vxm_u8m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -631,7 +574,6 @@ vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
   return vsbc_vvm_u8m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -642,7 +584,6 @@ vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
   return vsbc_vxm_u8m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -653,7 +594,6 @@ vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vsbc_vvm_u16mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -664,7 +604,6 @@ vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
   return vsbc_vxm_u16mf4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -675,7 +614,6 @@ vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vsbc_vvm_u16mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -686,7 +624,6 @@ vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
   return vsbc_vxm_u16mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -697,7 +634,6 @@ vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
   return vsbc_vvm_u16m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -708,7 +644,6 @@ vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
   return vsbc_vxm_u16m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -719,7 +654,6 @@ vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
   return vsbc_vvm_u16m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -730,7 +664,6 @@ vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
   return vsbc_vxm_u16m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -741,7 +674,6 @@ vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
   return vsbc_vvm_u16m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -752,7 +684,6 @@ vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
   return vsbc_vxm_u16m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -763,7 +694,6 @@ vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
   return vsbc_vvm_u16m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +704,6 @@ vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
   return vsbc_vxm_u16m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vsbc_vvm_u32mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -796,7 +724,6 @@ vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
   return vsbc_vxm_u32mf2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -807,7 +734,6 @@ vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
   return vsbc_vvm_u32m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -818,7 +744,6 @@ vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
   return vsbc_vxm_u32m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -829,7 +754,6 @@ vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
   return vsbc_vvm_u32m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -840,7 +764,6 @@ vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
   return vsbc_vxm_u32m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -851,7 +774,6 @@ vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
   return vsbc_vvm_u32m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -862,7 +784,6 @@ vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
   return vsbc_vxm_u32m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -873,7 +794,6 @@ vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
   return vsbc_vvm_u32m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -884,7 +804,6 @@ vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
   return vsbc_vxm_u32m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -895,7 +814,6 @@ vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
   return vsbc_vvm_u64m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -906,7 +824,6 @@ vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
   return vsbc_vxm_u64m1(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -917,7 +834,6 @@ vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
   return vsbc_vvm_u64m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -928,7 +844,6 @@ vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
   return vsbc_vxm_u64m2(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -939,7 +854,6 @@ vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
   return vsbc_vvm_u64m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -950,7 +864,6 @@ vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
   return vsbc_vxm_u64m4(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -961,7 +874,6 @@ vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
   return vsbc_vvm_u64m8(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
index 24be494131d28..30ef1987507a0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -18,7 +17,6 @@ size_t test_vsetvl_e8m1(size_t avl) {
   return vsetvl_e8m1(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -31,7 +29,6 @@ size_t test_vsetvl_e8m2(size_t avl) {
   return vsetvl_e8m2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -44,7 +41,6 @@ size_t test_vsetvl_e8m4(size_t avl) {
   return vsetvl_e8m4(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -57,7 +53,6 @@ size_t test_vsetvl_e8m8(size_t avl) {
   return vsetvl_e8m8(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -70,7 +65,6 @@ size_t test_vsetvl_e8mf2(size_t avl) {
   return vsetvl_e8mf2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -83,7 +77,6 @@ size_t test_vsetvl_e8mf4(size_t avl) {
   return vsetvl_e8mf4(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -96,7 +89,6 @@ size_t test_vsetvl_e8mf8(size_t avl) {
   return vsetvl_e8mf8(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -109,7 +101,6 @@ size_t test_vsetvl_e16m1(size_t avl) {
   return vsetvl_e16m1(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -122,7 +113,6 @@ size_t test_vsetvl_e16m2(size_t avl) {
   return vsetvl_e16m2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -135,7 +125,6 @@ size_t test_vsetvl_e16m4(size_t avl) {
   return vsetvl_e16m4(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -148,7 +137,6 @@ size_t test_vsetvl_e16m8(size_t avl) {
   return vsetvl_e16m8(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -161,7 +149,6 @@ size_t test_vsetvl_e16mf2(size_t avl) {
   return vsetvl_e16mf2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -174,7 +161,6 @@ size_t test_vsetvl_e16mf4(size_t avl) {
   return vsetvl_e16mf4(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -187,7 +173,6 @@ size_t test_vsetvl_e32m1(size_t avl) {
   return vsetvl_e32m1(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -200,7 +185,6 @@ size_t test_vsetvl_e32m2(size_t avl) {
   return vsetvl_e32m2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -213,7 +197,6 @@ size_t test_vsetvl_e32m4(size_t avl) {
   return vsetvl_e32m4(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -226,7 +209,6 @@ size_t test_vsetvl_e32m8(size_t avl) {
   return vsetvl_e32m8(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -239,7 +221,6 @@ size_t test_vsetvl_e32mf2(size_t avl) {
   return vsetvl_e32mf2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -252,7 +233,6 @@ size_t test_vsetvl_e64m1(size_t avl) {
   return vsetvl_e64m1(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -265,7 +245,6 @@ size_t test_vsetvl_e64m2(size_t avl) {
   return vsetvl_e64m2(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8
@@ -278,7 +257,6 @@ size_t test_vsetvl_e64m4(size_t avl) {
   return vsetvl_e64m4(avl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvl_e64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[AVL_ADDR:%.*]] = alloca i64, align 8

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
index 9997de6dfa2e0..0aef38ccfa018 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 0)
@@ -15,7 +14,6 @@ size_t test_vsetvlmax_e8m1() {
   return vsetvlmax_e8m1();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 1)
@@ -25,7 +23,6 @@ size_t test_vsetvlmax_e8m2() {
   return vsetvlmax_e8m2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 2)
@@ -35,7 +32,6 @@ size_t test_vsetvlmax_e8m4() {
   return vsetvlmax_e8m4();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 3)
@@ -45,7 +41,6 @@ size_t test_vsetvlmax_e8m8() {
   return vsetvlmax_e8m8();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 7)
@@ -55,7 +50,6 @@ size_t test_vsetvlmax_e8mf2() {
   return vsetvlmax_e8mf2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 6)
@@ -65,7 +59,6 @@ size_t test_vsetvlmax_e8mf4() {
   return vsetvlmax_e8mf4();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 5)
@@ -75,7 +68,6 @@ size_t test_vsetvlmax_e8mf8() {
   return vsetvlmax_e8mf8();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 0)
@@ -85,7 +77,6 @@ size_t test_vsetvlmax_e16m1() {
   return vsetvlmax_e16m1();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
@@ -95,7 +86,6 @@ size_t test_vsetvlmax_e16m2() {
   return vsetvlmax_e16m2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 2)
@@ -105,7 +95,6 @@ size_t test_vsetvlmax_e16m4() {
   return vsetvlmax_e16m4();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 3)
@@ -115,7 +104,6 @@ size_t test_vsetvlmax_e16m8() {
   return vsetvlmax_e16m8();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 7)
@@ -125,7 +113,6 @@ size_t test_vsetvlmax_e16mf2() {
   return vsetvlmax_e16mf2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 6)
@@ -135,7 +122,6 @@ size_t test_vsetvlmax_e16mf4() {
   return vsetvlmax_e16mf4();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 0)
@@ -145,7 +131,6 @@ size_t test_vsetvlmax_e32m1() {
   return vsetvlmax_e32m1();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 1)
@@ -155,7 +140,6 @@ size_t test_vsetvlmax_e32m2() {
   return vsetvlmax_e32m2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 2)
@@ -165,7 +149,6 @@ size_t test_vsetvlmax_e32m4() {
   return vsetvlmax_e32m4();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 3)
@@ -175,7 +158,6 @@ size_t test_vsetvlmax_e32m8() {
   return vsetvlmax_e32m8();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 7)
@@ -185,7 +167,6 @@ size_t test_vsetvlmax_e32mf2() {
   return vsetvlmax_e32mf2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 0)
@@ -195,7 +176,6 @@ size_t test_vsetvlmax_e64m1() {
   return vsetvlmax_e64m1();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 1)
@@ -205,7 +185,6 @@ size_t test_vsetvlmax_e64m2() {
   return vsetvlmax_e64m2();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 2)
@@ -215,7 +194,6 @@ size_t test_vsetvlmax_e64m4() {
   return vsetvlmax_e64m4();
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsetvlmax_e64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 3)

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
index 025218a8a3819..da2696698d633 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
   return vsext_vf2_i16mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
   return vsext_vf2_i16mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
   return vsext_vf2_i16m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
   return vsext_vf2_i16m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
   return vsext_vf2_i16m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
   return vsext_vf2_i16m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
   return vsext_vf4_i32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
   return vsext_vf4_i32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
   return vsext_vf4_i32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
   return vsext_vf4_i32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
   return vsext_vf4_i32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
   return vsext_vf8_i64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
   return vsext_vf8_i64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
   return vsext_vf8_i64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
   return vsext_vf8_i64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
   return vsext_vf2_i32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
   return vsext_vf2_i32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
   return vsext_vf2_i32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
   return vsext_vf2_i32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
   return vsext_vf2_i32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
   return vsext_vf4_i64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
   return vsext_vf4_i64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
   return vsext_vf4_i64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
   return vsext_vf4_i64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
   return vsext_vf2_i64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
   return vsext_vf2_i64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
   return vsext_vf2_i64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
   return vsext_vf2_i64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vsext_vf2_i16mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +276,6 @@ vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vsext_vf2_i16mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vsext_vf2_i16m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -328,7 +296,6 @@ vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vsext_vf2_i16m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -339,7 +306,6 @@ vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vsext_vf2_i16m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -350,7 +316,6 @@ vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vsext_vf2_i16m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -361,7 +326,6 @@ vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vsext_vf4_i32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -372,7 +336,6 @@ vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vsext_vf4_i32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -383,7 +346,6 @@ vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vsext_vf4_i32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -394,7 +356,6 @@ vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vsext_vf4_i32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -405,7 +366,6 @@ vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vsext_vf4_i32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -416,7 +376,6 @@ vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsext_vf8_i64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -427,7 +386,6 @@ vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsext_vf8_i64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -438,7 +396,6 @@ vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsext_vf8_i64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -449,7 +406,6 @@ vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vsext_vf8_i64m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -460,7 +416,6 @@ vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vsext_vf2_i32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -471,7 +426,6 @@ vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vsext_vf2_i32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -482,7 +436,6 @@ vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vsext_vf2_i32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -493,7 +446,6 @@ vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vsext_vf2_i32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +456,6 @@ vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vsext_vf2_i32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -515,7 +466,6 @@ vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsext_vf4_i64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -526,7 +476,6 @@ vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsext_vf4_i64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -537,7 +486,6 @@ vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsext_vf4_i64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +496,6 @@ vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vsext_vf4_i64m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -559,7 +506,6 @@ vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsext_vf2_i64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -570,7 +516,6 @@ vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsext_vf2_i64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -581,7 +526,6 @@ vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsext_vf2_i64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
index 87c34b8a3f026..8171926228f7c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8mf8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
   return vslide1down_vx_i8m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
   return vslide1down_vx_i16mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
   return vslide1down_vx_i16mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
   return vslide1down_vx_i16m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -116,7 +105,6 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
   return vslide1down_vx_i16m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -126,7 +114,6 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
   return vslide1down_vx_i16m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -136,7 +123,6 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
   return vslide1down_vx_i16m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -147,7 +133,6 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
   return vslide1down_vx_i32mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -157,7 +142,6 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
   return vslide1down_vx_i32m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -167,7 +151,6 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
   return vslide1down_vx_i32m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -177,7 +160,6 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
   return vslide1down_vx_i32m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -187,7 +169,6 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
   return vslide1down_vx_i32m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -197,7 +178,6 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
   return vslide1down_vx_i64m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -207,7 +187,6 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
   return vslide1down_vx_i64m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -217,7 +196,6 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
   return vslide1down_vx_i64m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -227,7 +205,6 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
   return vslide1down_vx_i64m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -238,7 +215,6 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
   return vslide1down_vx_u8mf8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -249,7 +225,6 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
   return vslide1down_vx_u8mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -260,7 +235,6 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
   return vslide1down_vx_u8mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -270,7 +244,6 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
   return vslide1down_vx_u8m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -280,7 +253,6 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
   return vslide1down_vx_u8m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -290,7 +262,6 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
   return vslide1down_vx_u8m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -300,7 +271,6 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
   return vslide1down_vx_u8m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -311,7 +281,6 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
   return vslide1down_vx_u16mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -322,7 +291,6 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
   return vslide1down_vx_u16mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -333,7 +301,6 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
   return vslide1down_vx_u16m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -344,7 +311,6 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
   return vslide1down_vx_u16m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -355,7 +321,6 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
   return vslide1down_vx_u16m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -366,7 +331,6 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
   return vslide1down_vx_u16m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -377,7 +341,6 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
   return vslide1down_vx_u32mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -388,7 +351,6 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
   return vslide1down_vx_u32m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -399,7 +361,6 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
   return vslide1down_vx_u32m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -410,7 +371,6 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
   return vslide1down_vx_u32m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -421,7 +381,6 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
   return vslide1down_vx_u32m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -432,7 +391,6 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
   return vslide1down_vx_u64m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -443,7 +401,6 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
   return vslide1down_vx_u64m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -454,7 +411,6 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
   return vslide1down_vx_u64m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -465,7 +421,6 @@ vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
   return vslide1down_vx_u64m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -477,7 +432,6 @@ vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vslide1down_vx_i8mf8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -489,7 +443,6 @@ vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vslide1down_vx_i8mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -501,7 +454,6 @@ vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vslide1down_vx_i8mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +464,6 @@ vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vslide1down_vx_i8m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -523,7 +474,6 @@ vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vslide1down_vx_i8m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +484,6 @@ vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vslide1down_vx_i8m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -545,7 +494,6 @@ vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vslide1down_vx_i8m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -557,7 +505,6 @@ vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vslide1down_vx_i16mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -569,7 +516,6 @@ vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vslide1down_vx_i16mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -581,7 +527,6 @@ vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vslide1down_vx_i16m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -593,7 +538,6 @@ vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vslide1down_vx_i16m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -605,7 +549,6 @@ vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vslide1down_vx_i16m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -617,7 +560,6 @@ vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vslide1down_vx_i16m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -629,7 +571,6 @@ vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vslide1down_vx_i32mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -641,7 +582,6 @@ vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vslide1down_vx_i32m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -653,7 +593,6 @@ vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vslide1down_vx_i32m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +604,6 @@ vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vslide1down_vx_i32m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -677,7 +615,6 @@ vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vslide1down_vx_i32m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -689,7 +626,6 @@ vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vslide1down_vx_i64m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -701,7 +637,6 @@ vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vslide1down_vx_i64m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -713,7 +648,6 @@ vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vslide1down_vx_i64m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -725,7 +659,6 @@ vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vslide1down_vx_i64m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -737,7 +670,6 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vslide1down_vx_u8mf8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -749,7 +681,6 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vslide1down_vx_u8mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -761,7 +692,6 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vslide1down_vx_u8mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -773,7 +703,6 @@ vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vslide1down_vx_u8m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vslide1down_vx_u8m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -797,7 +725,6 @@ vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vslide1down_vx_u8m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -809,7 +736,6 @@ vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vslide1down_vx_u8m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -822,7 +748,6 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask,
   return vslide1down_vx_u16mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -835,7 +760,6 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask,
   return vslide1down_vx_u16mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -847,7 +771,6 @@ vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vslide1down_vx_u16m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -859,7 +782,6 @@ vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vslide1down_vx_u16m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -871,7 +793,6 @@ vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vslide1down_vx_u16m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -883,7 +804,6 @@ vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vslide1down_vx_u16m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +816,6 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask,
   return vslide1down_vx_u32mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +827,6 @@ vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vslide1down_vx_u32m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +838,6 @@ vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vslide1down_vx_u32m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +849,6 @@ vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vslide1down_vx_u32m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +860,6 @@ vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vslide1down_vx_u32m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +871,6 @@ vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vslide1down_vx_u64m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +882,6 @@ vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vslide1down_vx_u64m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +893,6 @@ vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vslide1down_vx_u64m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
index 4b9fecd7ed6ef..0c154d570ab76 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8mf8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
   return vslide1up_vx_i8m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
   return vslide1up_vx_i16mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
   return vslide1up_vx_i16mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
   return vslide1up_vx_i16m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -116,7 +105,6 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
   return vslide1up_vx_i16m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -126,7 +114,6 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
   return vslide1up_vx_i16m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -136,7 +123,6 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
   return vslide1up_vx_i16m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -147,7 +133,6 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
   return vslide1up_vx_i32mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -157,7 +142,6 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
   return vslide1up_vx_i32m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -167,7 +151,6 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
   return vslide1up_vx_i32m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -177,7 +160,6 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
   return vslide1up_vx_i32m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -187,7 +169,6 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
   return vslide1up_vx_i32m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -197,7 +178,6 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
   return vslide1up_vx_i64m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -207,7 +187,6 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
   return vslide1up_vx_i64m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -217,7 +196,6 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
   return vslide1up_vx_i64m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -227,7 +205,6 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
   return vslide1up_vx_i64m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -237,7 +214,6 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8mf8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -247,7 +223,6 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -257,7 +232,6 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -267,7 +241,6 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
   return vslide1up_vx_u8m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -308,7 +278,6 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
   return vslide1up_vx_u16mf4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -319,7 +288,6 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
   return vslide1up_vx_u16mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -330,7 +298,6 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
   return vslide1up_vx_u16m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -341,7 +308,6 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
   return vslide1up_vx_u16m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -352,7 +318,6 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
   return vslide1up_vx_u16m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -363,7 +328,6 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
   return vslide1up_vx_u16m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -374,7 +338,6 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
   return vslide1up_vx_u32mf2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -385,7 +348,6 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
   return vslide1up_vx_u32m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -396,7 +358,6 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
   return vslide1up_vx_u32m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -407,7 +368,6 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
   return vslide1up_vx_u32m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -418,7 +378,6 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
   return vslide1up_vx_u32m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -429,7 +388,6 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
   return vslide1up_vx_u64m1(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -440,7 +398,6 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
   return vslide1up_vx_u64m2(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -451,7 +408,6 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
   return vslide1up_vx_u64m4(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -462,7 +418,6 @@ vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
   return vslide1up_vx_u64m8(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -473,7 +428,6 @@ vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vslide1up_vx_i8mf8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +438,6 @@ vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vslide1up_vx_i8mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -495,7 +448,6 @@ vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vslide1up_vx_i8mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -506,7 +458,6 @@ vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vslide1up_vx_i8m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -517,7 +468,6 @@ vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vslide1up_vx_i8m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -528,7 +478,6 @@ vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vslide1up_vx_i8m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -539,7 +488,6 @@ vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vslide1up_vx_i8m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -551,7 +499,6 @@ vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vslide1up_vx_i16mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -563,7 +510,6 @@ vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vslide1up_vx_i16mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +520,6 @@ vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vslide1up_vx_i16m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -585,7 +530,6 @@ vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vslide1up_vx_i16m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +540,6 @@ vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vslide1up_vx_i16m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -607,7 +550,6 @@ vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vslide1up_vx_i16m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -619,7 +561,6 @@ vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vslide1up_vx_i32mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -630,7 +571,6 @@ vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vslide1up_vx_i32m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -641,7 +581,6 @@ vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vslide1up_vx_i32m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -652,7 +591,6 @@ vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vslide1up_vx_i32m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -663,7 +601,6 @@ vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vslide1up_vx_i32m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -674,7 +611,6 @@ vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vslide1up_vx_i64m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -685,7 +621,6 @@ vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vslide1up_vx_i64m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -696,7 +631,6 @@ vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vslide1up_vx_i64m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -707,7 +641,6 @@ vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vslide1up_vx_i64m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -719,7 +652,6 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vslide1up_vx_u8mf8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -731,7 +663,6 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vslide1up_vx_u8mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -743,7 +674,6 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vslide1up_vx_u8mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -754,7 +684,6 @@ vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vslide1up_vx_u8m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -765,7 +694,6 @@ vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vslide1up_vx_u8m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +704,6 @@ vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vslide1up_vx_u8m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -787,7 +714,6 @@ vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vslide1up_vx_u8m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -799,7 +725,6 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vslide1up_vx_u16mf4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -811,7 +736,6 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vslide1up_vx_u16mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -823,7 +747,6 @@ vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vslide1up_vx_u16m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -835,7 +758,6 @@ vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vslide1up_vx_u16m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -847,7 +769,6 @@ vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vslide1up_vx_u16m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -859,7 +780,6 @@ vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vslide1up_vx_u16m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -871,7 +791,6 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vslide1up_vx_u32mf2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -883,7 +802,6 @@ vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vslide1up_vx_u32m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -895,7 +813,6 @@ vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vslide1up_vx_u32m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -907,7 +824,6 @@ vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vslide1up_vx_u32m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -919,7 +835,6 @@ vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vslide1up_vx_u32m8_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -931,7 +846,6 @@ vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vslide1up_vx_u64m1_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -943,7 +857,6 @@ vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vslide1up_vx_u64m2_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -955,7 +868,6 @@ vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vslide1up_vx_u64m4_m(mask, maskedoff, src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
index 1f19ffb61f630..e2192eab3a53d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
   return vslidedown_vx_i8mf8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
   return vslidedown_vx_i8mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
   return vslidedown_vx_i8mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
   return vslidedown_vx_i8m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
   return vslidedown_vx_i8m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
   return vslidedown_vx_i8m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
   return vslidedown_vx_i8m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
   return vslidedown_vx_i16mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
   return vslidedown_vx_i16mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
   return vslidedown_vx_i16m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
   return vslidedown_vx_i16m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
   return vslidedown_vx_i16m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
   return vslidedown_vx_i16m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
   return vslidedown_vx_i32mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
   return vslidedown_vx_i32m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
   return vslidedown_vx_i32m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
   return vslidedown_vx_i32m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
   return vslidedown_vx_i32m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -214,7 +195,6 @@ vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
   return vslidedown_vx_i64m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -225,7 +205,6 @@ vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
   return vslidedown_vx_i64m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -236,7 +215,6 @@ vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
   return vslidedown_vx_i64m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -247,7 +225,6 @@ vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
   return vslidedown_vx_i64m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -258,7 +235,6 @@ vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
   return vslidedown_vx_u8mf8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -269,7 +245,6 @@ vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
   return vslidedown_vx_u8mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -280,7 +255,6 @@ vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
   return vslidedown_vx_u8mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -291,7 +265,6 @@ vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
   return vslidedown_vx_u8m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -302,7 +275,6 @@ vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
   return vslidedown_vx_u8m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -313,7 +285,6 @@ vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
   return vslidedown_vx_u8m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -324,7 +295,6 @@ vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
   return vslidedown_vx_u8m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -335,7 +305,6 @@ vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
   return vslidedown_vx_u16mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
   return vslidedown_vx_u16mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -357,7 +325,6 @@ vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
   return vslidedown_vx_u16m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -368,7 +335,6 @@ vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
   return vslidedown_vx_u16m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -379,7 +345,6 @@ vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
   return vslidedown_vx_u16m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
   return vslidedown_vx_u16m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -401,7 +365,6 @@ vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
   return vslidedown_vx_u32mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
   return vslidedown_vx_u32m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -423,7 +385,6 @@ vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
   return vslidedown_vx_u32m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -434,7 +395,6 @@ vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
   return vslidedown_vx_u32m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -445,7 +405,6 @@ vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
   return vslidedown_vx_u32m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -456,7 +415,6 @@ vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
   return vslidedown_vx_u64m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -467,7 +425,6 @@ vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
   return vslidedown_vx_u64m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -478,7 +435,6 @@ vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
   return vslidedown_vx_u64m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -489,7 +445,6 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
   return vslidedown_vx_u64m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
   return vslidedown_vx_f32mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -511,7 +465,6 @@ vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
   return vslidedown_vx_f32m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -522,7 +475,6 @@ vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
   return vslidedown_vx_f32m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -533,7 +485,6 @@ vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
   return vslidedown_vx_f32m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -544,7 +495,6 @@ vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
   return vslidedown_vx_f32m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -555,7 +505,6 @@ vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
   return vslidedown_vx_f64m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -566,7 +515,6 @@ vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
   return vslidedown_vx_f64m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -577,7 +525,6 @@ vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
   return vslidedown_vx_f64m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -588,7 +535,6 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
   return vslidedown_vx_f64m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -600,7 +546,6 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
   return vslidedown_vx_i8mf8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -612,7 +557,6 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
   return vslidedown_vx_i8mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -624,7 +568,6 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
   return vslidedown_vx_i8mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -635,7 +578,6 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
   return vslidedown_vx_i8m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -646,7 +588,6 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
   return vslidedown_vx_i8m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -657,7 +598,6 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
   return vslidedown_vx_i8m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +608,6 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
   return vslidedown_vx_i8m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +619,6 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
   return vslidedown_vx_i16mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +630,6 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
   return vslidedown_vx_i16mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +641,6 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vslidedown_vx_i16m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +652,6 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
   return vslidedown_vx_i16m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +663,6 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
   return vslidedown_vx_i16m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +674,6 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
   return vslidedown_vx_i16m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +685,6 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
   return vslidedown_vx_i32mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +696,6 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vslidedown_vx_i32m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +707,6 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
   return vslidedown_vx_i32m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +718,6 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
   return vslidedown_vx_i32m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +729,6 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
   return vslidedown_vx_i32m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +740,6 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vslidedown_vx_i64m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +751,6 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
   return vslidedown_vx_i64m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +762,6 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
   return vslidedown_vx_i64m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +773,6 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
   return vslidedown_vx_i64m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +784,6 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
   return vslidedown_vx_u8mf8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +795,6 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
   return vslidedown_vx_u8mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +806,6 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
   return vslidedown_vx_u8mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -895,7 +816,6 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vslidedown_vx_u8m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -906,7 +826,6 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
   return vslidedown_vx_u8m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -917,7 +836,6 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
   return vslidedown_vx_u8m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -928,7 +846,6 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
   return vslidedown_vx_u8m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -940,7 +857,6 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
   return vslidedown_vx_u16mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -952,7 +868,6 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
   return vslidedown_vx_u16mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +879,6 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vslidedown_vx_u16m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +890,6 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
   return vslidedown_vx_u16m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -988,7 +901,6 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
   return vslidedown_vx_u16m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1000,7 +912,6 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
   return vslidedown_vx_u16m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1012,7 +923,6 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
   return vslidedown_vx_u32mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +934,6 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vslidedown_vx_u32m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1036,7 +945,6 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
   return vslidedown_vx_u32m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1048,7 +956,6 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
   return vslidedown_vx_u32m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1060,7 +967,6 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
   return vslidedown_vx_u32m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +978,6 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vslidedown_vx_u64m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +989,6 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
   return vslidedown_vx_u64m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1096,7 +1000,6 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
   return vslidedown_vx_u64m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1108,7 +1011,6 @@ vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
   return vslidedown_vx_u64m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1120,7 +1022,6 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
   return vslidedown_vx_f32mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1033,6 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vslidedown_vx_f32m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1044,6 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
   return vslidedown_vx_f32m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1055,6 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
   return vslidedown_vx_f32m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1168,7 +1066,6 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
   return vslidedown_vx_f32m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1180,7 +1077,6 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vslidedown_vx_f64m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1088,6 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
   return vslidedown_vx_f64m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1099,6 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
   return vslidedown_vx_f64m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
index 01d7a9119f69f..e9757b67a3b4c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
   return vslideup_vx_i8mf8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
   return vslideup_vx_i8mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
   return vslideup_vx_i8mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
   return vslideup_vx_i8m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
   return vslideup_vx_i8m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
   return vslideup_vx_i8m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
   return vslideup_vx_i8m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
   return vslideup_vx_i16mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
   return vslideup_vx_i16mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
   return vslideup_vx_i16m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
   return vslideup_vx_i16m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
   return vslideup_vx_i16m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
   return vslideup_vx_i16m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
   return vslideup_vx_i32mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
   return vslideup_vx_i32m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
   return vslideup_vx_i32m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
   return vslideup_vx_i32m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
   return vslideup_vx_i32m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -214,7 +195,6 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
   return vslideup_vx_i64m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -225,7 +205,6 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
   return vslideup_vx_i64m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -236,7 +215,6 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
   return vslideup_vx_i64m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -247,7 +225,6 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
   return vslideup_vx_i64m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -258,7 +235,6 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
   return vslideup_vx_u8mf8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -269,7 +245,6 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
   return vslideup_vx_u8mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -280,7 +255,6 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
   return vslideup_vx_u8mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -291,7 +265,6 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
   return vslideup_vx_u8m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -302,7 +275,6 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
   return vslideup_vx_u8m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -313,7 +285,6 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
   return vslideup_vx_u8m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -324,7 +295,6 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
   return vslideup_vx_u8m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -335,7 +305,6 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
   return vslideup_vx_u16mf4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
   return vslideup_vx_u16mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -357,7 +325,6 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
   return vslideup_vx_u16m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -368,7 +335,6 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
   return vslideup_vx_u16m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -379,7 +345,6 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
   return vslideup_vx_u16m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
   return vslideup_vx_u16m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -401,7 +365,6 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
   return vslideup_vx_u32mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
   return vslideup_vx_u32m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -423,7 +385,6 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
   return vslideup_vx_u32m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -434,7 +395,6 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
   return vslideup_vx_u32m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -445,7 +405,6 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
   return vslideup_vx_u32m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -456,7 +415,6 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
   return vslideup_vx_u64m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -467,7 +425,6 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
   return vslideup_vx_u64m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -478,7 +435,6 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
   return vslideup_vx_u64m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -489,7 +445,6 @@ vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
   return vslideup_vx_u64m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
   return vslideup_vx_f32mf2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -511,7 +465,6 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
   return vslideup_vx_f32m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -522,7 +475,6 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
   return vslideup_vx_f32m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -533,7 +485,6 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
   return vslideup_vx_f32m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -544,7 +495,6 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
   return vslideup_vx_f32m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -555,7 +505,6 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
   return vslideup_vx_f64m1(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -566,7 +515,6 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
   return vslideup_vx_f64m2(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -577,7 +525,6 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
   return vslideup_vx_f64m4(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -588,7 +535,6 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
   return vslideup_vx_f64m8(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -599,7 +545,6 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
   return vslideup_vx_i8mf8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -610,7 +555,6 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
   return vslideup_vx_i8mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -621,7 +565,6 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
   return vslideup_vx_i8mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +575,6 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
   return vslideup_vx_i8m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -643,7 +585,6 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
   return vslideup_vx_i8m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +595,6 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
   return vslideup_vx_i8m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +605,6 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
   return vslideup_vx_i8m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -677,7 +616,6 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
   return vslideup_vx_i16mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -689,7 +627,6 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
   return vslideup_vx_i16mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -700,7 +637,6 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vslideup_vx_i16m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -711,7 +647,6 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
   return vslideup_vx_i16m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -722,7 +657,6 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
   return vslideup_vx_i16m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -733,7 +667,6 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
   return vslideup_vx_i16m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -745,7 +678,6 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
   return vslideup_vx_i32mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -756,7 +688,6 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vslideup_vx_i32m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -767,7 +698,6 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
   return vslideup_vx_i32m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -778,7 +708,6 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
   return vslideup_vx_i32m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -789,7 +718,6 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
   return vslideup_vx_i32m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +728,6 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vslideup_vx_i64m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -811,7 +738,6 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
   return vslideup_vx_i64m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -822,7 +748,6 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
   return vslideup_vx_i64m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -833,7 +758,6 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
   return vslideup_vx_i64m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -845,7 +769,6 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
   return vslideup_vx_u8mf8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -857,7 +780,6 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
   return vslideup_vx_u8mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -869,7 +791,6 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
   return vslideup_vx_u8mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -880,7 +801,6 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vslideup_vx_u8m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -891,7 +811,6 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
   return vslideup_vx_u8m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -902,7 +821,6 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
   return vslideup_vx_u8m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -913,7 +831,6 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
   return vslideup_vx_u8m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -925,7 +842,6 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
   return vslideup_vx_u16mf4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -937,7 +853,6 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
   return vslideup_vx_u16mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -949,7 +864,6 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vslideup_vx_u16m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -961,7 +875,6 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
   return vslideup_vx_u16m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -973,7 +886,6 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
   return vslideup_vx_u16m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -985,7 +897,6 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
   return vslideup_vx_u16m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -997,7 +908,6 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
   return vslideup_vx_u32mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +919,6 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vslideup_vx_u32m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1021,7 +930,6 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
   return vslideup_vx_u32m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1033,7 +941,6 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
   return vslideup_vx_u32m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1045,7 +952,6 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
   return vslideup_vx_u32m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +963,6 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vslideup_vx_u64m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1069,7 +974,6 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
   return vslideup_vx_u64m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1081,7 +985,6 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
   return vslideup_vx_u64m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1093,7 +996,6 @@ vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
   return vslideup_vx_u64m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1105,7 +1007,6 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
   return vslideup_vx_f32mf2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1117,7 +1018,6 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vslideup_vx_f32m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1129,7 +1029,6 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
   return vslideup_vx_f32m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1040,6 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
   return vslideup_vx_f32m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1153,7 +1051,6 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
   return vslideup_vx_f32m8_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1062,6 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vslideup_vx_f64m1_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1177,7 +1073,6 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
   return vslideup_vx_f64m2_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1189,7 +1084,6 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
   return vslideup_vx_f64m4_m(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
index 09571dad18752..ae4465ac3c685 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsll_vv_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsll_vv_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsll_vv_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsll_vv_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsll_vv_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsll_vv_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsll_vv_i8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_i8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
   return vsll_vv_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
   return vsll_vx_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
   return vsll_vv_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsll_vv_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsll_vv_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsll_vv_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsll_vv_i16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_i16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
   return vsll_vv_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsll_vv_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsll_vv_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsll_vv_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsll_vv_i32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_i32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsll_vv_i64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_i64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsll_vv_i64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_i64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsll_vv_i64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_i64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsll_vv_i64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_i64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsll_vv_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsll_vv_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsll_vv_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsll_vv_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsll_vv_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsll_vv_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsll_vv_u8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_u8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
   return vsll_vv_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vsll_vx_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
   return vsll_vv_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsll_vv_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsll_vv_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsll_vv_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsll_vv_u16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_u16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
   return vsll_vv_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsll_vv_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsll_vv_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsll_vv_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsll_vv_u32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_u32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsll_vv_u64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
   return vsll_vx_u64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsll_vv_u64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
   return vsll_vx_u64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsll_vv_u64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
   return vsll_vx_u64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsll_vv_u64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
   return vsll_vx_u64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vsll_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vsll_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vsll_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vsll_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vsll_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vsll_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vsll_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vsll_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vsll_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vsll_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vsll_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vsll_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vsll_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vsll_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vsll_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vsll_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vsll_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vsll_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vsll_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vsll_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vsll_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vsll_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vsll_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vsll_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vsll_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vsll_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vsll_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vsll_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vsll_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vsll_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vsll_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vsll_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vsll_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vsll_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vsll_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vsll_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vsll_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vsll_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vsll_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vsll_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vsll_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vsll_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vsll_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vsll_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vsll_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vsll_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vsll_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vsll_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vsll_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vsll_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vsll_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vsll_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vsll_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vsll_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vsll_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vsll_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vsll_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vsll_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vsll_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vsll_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vsll_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vsll_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vsll_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vsll_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vsll_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vsll_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vsll_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vsll_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vsll_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vsll_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vsll_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vsll_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vsll_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vsll_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vsll_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vsll_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vsll_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vsll_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vsll_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vsll_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vsll_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vsll_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vsll_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vsll_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vsll_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vsll_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vsll_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
index 0862d003fb557..33480db7bfe89 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vsmul_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vsmul_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vsmul_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vsmul_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vsmul_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vsmul_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vsmul_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vsmul_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vsmul_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vsmul_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vsmul_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vsmul_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vsmul_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vsmul_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vsmul_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vsmul_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vsmul_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vsmul_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vsmul_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vsmul_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vsmul_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vsmul_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vsmul_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vsmul_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vsmul_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vsmul_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vsmul_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vsmul_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vsmul_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vsmul_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vsmul_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vsmul_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vsmul_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vsmul_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vsmul_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vsmul_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vsmul_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vsmul_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vsmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -466,7 +420,6 @@ vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vsmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -477,7 +430,6 @@ vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vsmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -488,7 +440,6 @@ vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vsmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -499,7 +450,6 @@ vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vsmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -510,7 +460,6 @@ vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vsmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -521,7 +470,6 @@ vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -532,7 +480,6 @@ vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -543,7 +490,6 @@ vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +500,6 @@ vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -565,7 +510,6 @@ vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -576,7 +520,6 @@ vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -587,7 +530,6 @@ vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -598,7 +540,6 @@ vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -610,7 +551,6 @@ vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vsmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -621,7 +561,6 @@ vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vsmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -633,7 +572,6 @@ vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vsmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +582,6 @@ vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vsmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -655,7 +592,6 @@ vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -666,7 +602,6 @@ vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -677,7 +612,6 @@ vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -688,7 +622,6 @@ vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -699,7 +632,6 @@ vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -710,7 +642,6 @@ vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -721,7 +652,6 @@ vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -732,7 +662,6 @@ vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -744,7 +673,6 @@ vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vsmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -755,7 +683,6 @@ vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vsmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -766,7 +693,6 @@ vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -777,7 +703,6 @@ vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +713,6 @@ vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -799,7 +723,6 @@ vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -810,7 +733,6 @@ vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -821,7 +743,6 @@ vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -832,7 +753,6 @@ vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -843,7 +763,6 @@ vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -854,7 +773,6 @@ vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -865,7 +783,6 @@ vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -876,7 +793,6 @@ vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -887,7 +803,6 @@ vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -898,7 +813,6 @@ vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -909,7 +823,6 @@ vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +833,6 @@ vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
index 8850d9828b3d3..46e8b231bfabc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -17,7 +16,6 @@ void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value,
   return vsoxei8_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -29,7 +27,6 @@ void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value,
   return vsoxei8_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -41,7 +38,6 @@ void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value,
   return vsoxei8_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -53,7 +49,6 @@ void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value,
   return vsoxei8_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -65,7 +60,6 @@ void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value,
   return vsoxei8_v_i8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -77,7 +71,6 @@ void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value,
   return vsoxei8_v_i8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -89,7 +82,6 @@ void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value,
   return vsoxei8_v_i8m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -101,7 +93,6 @@ void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
   return vsoxei16_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -113,7 +104,6 @@ void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
   return vsoxei16_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -125,7 +115,6 @@ void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value,
   return vsoxei16_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -137,7 +126,6 @@ void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value,
   return vsoxei16_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -149,7 +137,6 @@ void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value,
   return vsoxei16_v_i8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -161,7 +148,6 @@ void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value,
   return vsoxei16_v_i8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -173,7 +159,6 @@ void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
   return vsoxei32_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -185,7 +170,6 @@ void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value,
   return vsoxei32_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -197,7 +181,6 @@ void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value,
   return vsoxei32_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -209,7 +192,6 @@ void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value,
   return vsoxei32_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -221,7 +203,6 @@ void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value,
   return vsoxei32_v_i8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -233,7 +214,6 @@ void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value,
   return vsoxei64_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -245,7 +225,6 @@ void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value,
   return vsoxei64_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -257,7 +236,6 @@ void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value,
   return vsoxei64_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -269,7 +247,6 @@ void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value,
   return vsoxei64_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -281,7 +258,6 @@ void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
   return vsoxei8_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -293,7 +269,6 @@ void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
   return vsoxei8_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -305,7 +280,6 @@ void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value,
   return vsoxei8_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -317,7 +291,6 @@ void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value,
   return vsoxei8_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -329,7 +302,6 @@ void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value,
   return vsoxei8_v_i16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -341,7 +313,6 @@ void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value,
   return vsoxei8_v_i16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -353,7 +324,6 @@ void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -365,7 +335,6 @@ void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -377,7 +346,6 @@ void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
   return vsoxei16_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -389,7 +357,6 @@ void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
   return vsoxei16_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -401,7 +368,6 @@ void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
   return vsoxei16_v_i16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -413,7 +379,6 @@ void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
   return vsoxei16_v_i16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -425,7 +390,6 @@ void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -437,7 +401,6 @@ void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex,
   return vsoxei32_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -449,7 +412,6 @@ void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
   return vsoxei32_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -461,7 +423,6 @@ void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
   return vsoxei32_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -473,7 +434,6 @@ void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
   return vsoxei32_v_i16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -485,7 +445,6 @@ void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex,
   return vsoxei64_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -497,7 +456,6 @@ void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex,
   return vsoxei64_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -509,7 +467,6 @@ void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
   return vsoxei64_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -521,7 +478,6 @@ void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
   return vsoxei64_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -533,7 +489,6 @@ void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
   return vsoxei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -545,7 +500,6 @@ void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value,
   return vsoxei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -557,7 +511,6 @@ void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value,
   return vsoxei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -569,7 +522,6 @@ void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value,
   return vsoxei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -581,7 +533,6 @@ void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value,
   return vsoxei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -593,7 +544,6 @@ void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -605,7 +555,6 @@ void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
   return vsoxei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -617,7 +566,6 @@ void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
   return vsoxei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -629,7 +577,6 @@ void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
   return vsoxei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -641,7 +588,6 @@ void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
   return vsoxei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -653,7 +599,6 @@ void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -665,7 +610,6 @@ void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
   return vsoxei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -677,7 +621,6 @@ void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
   return vsoxei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -689,7 +632,6 @@ void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
   return vsoxei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -701,7 +643,6 @@ void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
   return vsoxei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -713,7 +654,6 @@ void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
   return vsoxei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -725,7 +665,6 @@ void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
   return vsoxei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -737,7 +676,6 @@ void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
   return vsoxei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -749,7 +687,6 @@ void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
   return vsoxei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -761,7 +698,6 @@ void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value,
   return vsoxei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -773,7 +709,6 @@ void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value,
   return vsoxei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -785,7 +720,6 @@ void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value,
   return vsoxei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -797,7 +731,6 @@ void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value,
   return vsoxei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -809,7 +742,6 @@ void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
   return vsoxei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -821,7 +753,6 @@ void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
   return vsoxei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -833,7 +764,6 @@ void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
   return vsoxei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -845,7 +775,6 @@ void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
   return vsoxei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -857,7 +786,6 @@ void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
   return vsoxei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -869,7 +797,6 @@ void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
   return vsoxei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -881,7 +808,6 @@ void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
   return vsoxei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -893,7 +819,6 @@ void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
   return vsoxei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -905,7 +830,6 @@ void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
   return vsoxei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -917,7 +841,6 @@ void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
   return vsoxei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -929,7 +852,6 @@ void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
   return vsoxei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -941,7 +863,6 @@ void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
   return vsoxei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -953,7 +874,6 @@ void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
   return vsoxei8_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -965,7 +885,6 @@ void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
   return vsoxei8_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -977,7 +896,6 @@ void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
   return vsoxei8_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -989,7 +907,6 @@ void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value,
   return vsoxei8_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1001,7 +918,6 @@ void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value,
   return vsoxei8_v_u8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1013,7 +929,6 @@ void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value,
   return vsoxei8_v_u8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -1025,7 +940,6 @@ void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value,
   return vsoxei8_v_u8m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1037,7 +951,6 @@ void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1049,7 +962,6 @@ void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1061,7 +973,6 @@ void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
   return vsoxei16_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1073,7 +984,6 @@ void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value,
   return vsoxei16_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1085,7 +995,6 @@ void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value,
   return vsoxei16_v_u8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1097,7 +1006,6 @@ void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value,
   return vsoxei16_v_u8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1109,7 +1017,6 @@ void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1121,7 +1028,6 @@ void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
   return vsoxei32_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1133,7 +1039,6 @@ void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
   return vsoxei32_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1145,7 +1050,6 @@ void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value,
   return vsoxei32_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1157,7 +1061,6 @@ void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value,
   return vsoxei32_v_u8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1169,7 +1072,6 @@ void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
   return vsoxei64_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1181,7 +1083,6 @@ void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
   return vsoxei64_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1193,7 +1094,6 @@ void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
   return vsoxei64_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1205,7 +1105,6 @@ void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value,
   return vsoxei64_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1217,7 +1116,6 @@ void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1229,7 +1127,6 @@ void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1241,7 +1138,6 @@ void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
   return vsoxei8_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1253,7 +1149,6 @@ void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
   return vsoxei8_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1265,7 +1160,6 @@ void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
   return vsoxei8_v_u16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1277,7 +1171,6 @@ void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
   return vsoxei8_v_u16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1289,7 +1182,6 @@ void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1301,7 +1193,6 @@ void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1313,7 +1204,6 @@ void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1325,7 +1215,6 @@ void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1337,7 +1226,6 @@ void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex,
   return vsoxei16_v_u16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1349,7 +1237,6 @@ void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex,
   return vsoxei16_v_u16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1361,7 +1248,6 @@ void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1373,7 +1259,6 @@ void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex,
   return vsoxei32_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1385,7 +1270,6 @@ void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1397,7 +1281,6 @@ void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1409,7 +1292,6 @@ void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex,
   return vsoxei32_v_u16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1421,7 +1303,6 @@ void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex,
   return vsoxei64_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1433,7 +1314,6 @@ void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex,
   return vsoxei64_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1445,7 +1325,6 @@ void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1457,7 +1336,6 @@ void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1469,7 +1347,6 @@ void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1481,7 +1358,6 @@ void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
   return vsoxei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1493,7 +1369,6 @@ void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
   return vsoxei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1505,7 +1380,6 @@ void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
   return vsoxei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1517,7 +1391,6 @@ void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
   return vsoxei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1529,7 +1402,6 @@ void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1541,7 +1413,6 @@ void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1553,7 +1424,6 @@ void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1565,7 +1435,6 @@ void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1577,7 +1446,6 @@ void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
   return vsoxei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1589,7 +1457,6 @@ void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1601,7 +1468,6 @@ void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
   return vsoxei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1613,7 +1479,6 @@ void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1625,7 +1490,6 @@ void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1637,7 +1501,6 @@ void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
   return vsoxei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1649,7 +1512,6 @@ void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
   return vsoxei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1661,7 +1523,6 @@ void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
   return vsoxei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1673,7 +1534,6 @@ void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1685,7 +1545,6 @@ void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1697,7 +1556,6 @@ void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
   return vsoxei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1709,7 +1567,6 @@ void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
   return vsoxei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1721,7 +1578,6 @@ void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
   return vsoxei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1733,7 +1589,6 @@ void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
   return vsoxei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1745,7 +1600,6 @@ void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1757,7 +1611,6 @@ void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1769,7 +1622,6 @@ void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1781,7 +1633,6 @@ void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1793,7 +1644,6 @@ void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1805,7 +1655,6 @@ void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
   return vsoxei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1817,7 +1666,6 @@ void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1829,7 +1677,6 @@ void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1841,7 +1688,6 @@ void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
   return vsoxei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1853,7 +1699,6 @@ void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
   return vsoxei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1865,7 +1710,6 @@ void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1877,7 +1721,6 @@ void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1889,7 +1732,6 @@ void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
   return vsoxei8_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1901,7 +1743,6 @@ void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value,
   return vsoxei8_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1913,7 +1754,6 @@ void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value,
   return vsoxei8_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1925,7 +1765,6 @@ void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value,
   return vsoxei8_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1937,7 +1776,6 @@ void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value,
   return vsoxei8_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1949,7 +1787,6 @@ void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex,
   return vsoxei16_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1961,7 +1798,6 @@ void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
   return vsoxei16_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1973,7 +1809,6 @@ void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
   return vsoxei16_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1985,7 +1820,6 @@ void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
   return vsoxei16_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1997,7 +1831,6 @@ void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
   return vsoxei16_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2009,7 +1842,6 @@ void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex,
   return vsoxei32_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2021,7 +1853,6 @@ void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
   return vsoxei32_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2033,7 +1864,6 @@ void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
   return vsoxei32_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2045,7 +1875,6 @@ void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
   return vsoxei32_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2057,7 +1886,6 @@ void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
   return vsoxei32_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2069,7 +1897,6 @@ void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex,
   return vsoxei64_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2081,7 +1908,6 @@ void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
   return vsoxei64_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2093,7 +1919,6 @@ void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
   return vsoxei64_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2105,7 +1930,6 @@ void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
   return vsoxei64_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2117,7 +1941,6 @@ void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
   return vsoxei8_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2129,7 +1952,6 @@ void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
   return vsoxei8_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2141,7 +1963,6 @@ void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
   return vsoxei8_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2153,7 +1974,6 @@ void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value,
   return vsoxei8_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2165,7 +1985,6 @@ void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex,
   return vsoxei16_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2177,7 +1996,6 @@ void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex,
   return vsoxei16_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2189,7 +2007,6 @@ void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
   return vsoxei16_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2201,7 +2018,6 @@ void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
   return vsoxei16_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2213,7 +2029,6 @@ void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex,
   return vsoxei32_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2225,7 +2040,6 @@ void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
   return vsoxei32_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2237,7 +2051,6 @@ void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
   return vsoxei32_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2249,7 +2062,6 @@ void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
   return vsoxei32_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2261,7 +2073,6 @@ void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
   return vsoxei64_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2273,7 +2084,6 @@ void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
   return vsoxei64_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2285,7 +2095,6 @@ void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
   return vsoxei64_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2297,7 +2106,6 @@ void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
   return vsoxei64_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2309,7 +2117,6 @@ void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2321,7 +2128,6 @@ void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2333,7 +2139,6 @@ void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2345,7 +2150,6 @@ void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex,
   return vsoxei8_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2357,7 +2161,6 @@ void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex,
   return vsoxei8_v_i8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2369,7 +2172,6 @@ void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex,
   return vsoxei8_v_i8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -2381,7 +2183,6 @@ void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex,
   return vsoxei8_v_i8m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2393,7 +2194,6 @@ void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2405,7 +2205,6 @@ void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2417,7 +2216,6 @@ void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex,
   return vsoxei16_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2429,7 +2227,6 @@ void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
   return vsoxei16_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2441,7 +2238,6 @@ void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
   return vsoxei16_v_i8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2453,7 +2249,6 @@ void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
   return vsoxei16_v_i8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2465,7 +2260,6 @@ void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2477,7 +2271,6 @@ void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex,
   return vsoxei32_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2489,7 +2282,6 @@ void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex,
   return vsoxei32_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2501,7 +2293,6 @@ void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
   return vsoxei32_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2513,7 +2304,6 @@ void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
   return vsoxei32_v_i8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2525,7 +2315,6 @@ void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex,
   return vsoxei64_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2537,7 +2326,6 @@ void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex,
   return vsoxei64_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2549,7 +2337,6 @@ void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex,
   return vsoxei64_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2561,7 +2348,6 @@ void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
   return vsoxei64_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2573,7 +2359,6 @@ void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2585,7 +2370,6 @@ void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2597,7 +2381,6 @@ void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2609,7 +2392,6 @@ void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
   return vsoxei8_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2621,7 +2403,6 @@ void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
   return vsoxei8_v_i16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2633,7 +2414,6 @@ void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex,
   return vsoxei8_v_i16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2646,7 +2426,6 @@ void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base,
   return vsoxei16_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2659,7 +2438,6 @@ void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base,
   return vsoxei16_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2671,7 +2449,6 @@ void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
   return vsoxei16_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2683,7 +2460,6 @@ void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex,
   return vsoxei16_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2695,7 +2471,6 @@ void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex,
   return vsoxei16_v_i16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2707,7 +2482,6 @@ void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex,
   return vsoxei16_v_i16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2720,7 +2494,6 @@ void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base,
   return vsoxei32_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2732,7 +2505,6 @@ void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
   return vsoxei32_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2744,7 +2516,6 @@ void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
   return vsoxei32_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2756,7 +2527,6 @@ void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex,
   return vsoxei32_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2768,7 +2538,6 @@ void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex,
   return vsoxei32_v_i16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2780,7 +2549,6 @@ void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
   return vsoxei64_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2792,7 +2560,6 @@ void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
   return vsoxei64_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2804,7 +2571,6 @@ void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
   return vsoxei64_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2816,7 +2582,6 @@ void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex,
   return vsoxei64_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2828,7 +2593,6 @@ void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2840,7 +2604,6 @@ void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2852,7 +2615,6 @@ void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2864,7 +2626,6 @@ void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
   return vsoxei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2876,7 +2637,6 @@ void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex,
   return vsoxei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2889,7 +2649,6 @@ void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vsoxei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2901,7 +2660,6 @@ void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2913,7 +2671,6 @@ void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
   return vsoxei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2925,7 +2682,6 @@ void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex,
   return vsoxei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2937,7 +2693,6 @@ void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex,
   return vsoxei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2950,7 +2705,6 @@ void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vsoxei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2962,7 +2716,6 @@ void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
   return vsoxei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2974,7 +2727,6 @@ void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
   return vsoxei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2986,7 +2738,6 @@ void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex,
   return vsoxei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2998,7 +2749,6 @@ void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex,
   return vsoxei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3010,7 +2760,6 @@ void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
   return vsoxei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3022,7 +2771,6 @@ void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
   return vsoxei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3034,7 +2782,6 @@ void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
   return vsoxei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3046,7 +2793,6 @@ void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex,
   return vsoxei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3058,7 +2804,6 @@ void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3070,7 +2815,6 @@ void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3082,7 +2826,6 @@ void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3094,7 +2837,6 @@ void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex,
   return vsoxei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3106,7 +2848,6 @@ void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3118,7 +2859,6 @@ void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3130,7 +2870,6 @@ void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
   return vsoxei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3142,7 +2881,6 @@ void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex,
   return vsoxei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3154,7 +2892,6 @@ void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3166,7 +2903,6 @@ void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
   return vsoxei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3178,7 +2914,6 @@ void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
   return vsoxei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3190,7 +2925,6 @@ void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex,
   return vsoxei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3202,7 +2936,6 @@ void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
   return vsoxei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3214,7 +2947,6 @@ void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
   return vsoxei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3226,7 +2958,6 @@ void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
   return vsoxei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3238,7 +2969,6 @@ void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex,
   return vsoxei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3250,7 +2980,6 @@ void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3262,7 +2991,6 @@ void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3274,7 +3002,6 @@ void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3286,7 +3013,6 @@ void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
   return vsoxei8_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3298,7 +3024,6 @@ void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
   return vsoxei8_v_u8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3310,7 +3035,6 @@ void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
   return vsoxei8_v_u8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -3322,7 +3046,6 @@ void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex,
   return vsoxei8_v_u8m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3334,7 +3057,6 @@ void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
   return vsoxei16_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3346,7 +3068,6 @@ void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
   return vsoxei16_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3358,7 +3079,6 @@ void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3370,7 +3090,6 @@ void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3382,7 +3101,6 @@ void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex,
   return vsoxei16_v_u8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3394,7 +3112,6 @@ void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex,
   return vsoxei16_v_u8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3406,7 +3123,6 @@ void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
   return vsoxei32_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3418,7 +3134,6 @@ void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
   return vsoxei32_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3430,7 +3145,6 @@ void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3442,7 +3156,6 @@ void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3454,7 +3167,6 @@ void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex,
   return vsoxei32_v_u8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3466,7 +3178,6 @@ void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
   return vsoxei64_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3478,7 +3189,6 @@ void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
   return vsoxei64_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3490,7 +3200,6 @@ void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3502,7 +3211,6 @@ void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3514,7 +3222,6 @@ void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3526,7 +3233,6 @@ void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3538,7 +3244,6 @@ void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3550,7 +3255,6 @@ void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex,
   return vsoxei8_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3562,7 +3266,6 @@ void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex,
   return vsoxei8_v_u16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3574,7 +3277,6 @@ void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex,
   return vsoxei8_v_u16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3587,7 +3289,6 @@ void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base,
   return vsoxei16_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3600,7 +3301,6 @@ void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base,
   return vsoxei16_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3612,7 +3312,6 @@ void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3624,7 +3323,6 @@ void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3636,7 +3334,6 @@ void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
   return vsoxei16_v_u16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3648,7 +3345,6 @@ void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
   return vsoxei16_v_u16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3661,7 +3357,6 @@ void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base,
   return vsoxei32_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3674,7 +3369,6 @@ void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base,
   return vsoxei32_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3686,7 +3380,6 @@ void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3698,7 +3391,6 @@ void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3710,7 +3402,6 @@ void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
   return vsoxei32_v_u16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3723,7 +3414,6 @@ void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base,
   return vsoxei64_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3736,7 +3426,6 @@ void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base,
   return vsoxei64_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3748,7 +3437,6 @@ void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3760,7 +3448,6 @@ void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3772,7 +3459,6 @@ void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3784,7 +3470,6 @@ void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3796,7 +3481,6 @@ void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3808,7 +3492,6 @@ void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex,
   return vsoxei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3820,7 +3503,6 @@ void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex,
   return vsoxei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3833,7 +3515,6 @@ void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vsoxei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3846,7 +3527,6 @@ void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
   return vsoxei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3858,7 +3538,6 @@ void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3870,7 +3549,6 @@ void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3882,7 +3560,6 @@ void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
   return vsoxei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3895,7 +3572,6 @@ void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vsoxei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3907,7 +3583,6 @@ void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
   return vsoxei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3919,7 +3594,6 @@ void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3931,7 +3605,6 @@ void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3943,7 +3616,6 @@ void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
   return vsoxei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3956,7 +3628,6 @@ void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vsoxei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3968,7 +3639,6 @@ void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
   return vsoxei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3980,7 +3650,6 @@ void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3992,7 +3661,6 @@ void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4004,7 +3672,6 @@ void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
   return vsoxei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4016,7 +3683,6 @@ void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
   return vsoxei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4028,7 +3694,6 @@ void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
   return vsoxei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4040,7 +3705,6 @@ void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex,
   return vsoxei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4053,7 +3717,6 @@ void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vsoxei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4066,7 +3729,6 @@ void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
   return vsoxei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4078,7 +3740,6 @@ void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
   return vsoxei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4090,7 +3751,6 @@ void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex,
   return vsoxei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4103,7 +3763,6 @@ void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vsoxei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4115,7 +3774,6 @@ void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
   return vsoxei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4127,7 +3785,6 @@ void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
   return vsoxei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4139,7 +3796,6 @@ void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex,
   return vsoxei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4151,7 +3807,6 @@ void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
   return vsoxei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4163,7 +3818,6 @@ void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
   return vsoxei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4175,7 +3829,6 @@ void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
   return vsoxei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4187,7 +3840,6 @@ void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex,
   return vsoxei64_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4199,7 +3851,6 @@ void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex,
   return vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4211,7 +3862,6 @@ void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
   return vsoxei8_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4223,7 +3873,6 @@ void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
   return vsoxei8_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4235,7 +3884,6 @@ void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex,
   return vsoxei8_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -4247,7 +3895,6 @@ void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex,
   return vsoxei8_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4259,7 +3906,6 @@ void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
   return vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4271,7 +3917,6 @@ void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex,
   return vsoxei16_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4283,7 +3928,6 @@ void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex,
   return vsoxei16_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4295,7 +3939,6 @@ void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
   return vsoxei16_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -4307,7 +3950,6 @@ void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex,
   return vsoxei16_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4319,7 +3961,6 @@ void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
   return vsoxei32_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4331,7 +3972,6 @@ void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex,
   return vsoxei32_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4343,7 +3983,6 @@ void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex,
   return vsoxei32_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4355,7 +3994,6 @@ void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
   return vsoxei32_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -4367,7 +4005,6 @@ void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex,
   return vsoxei32_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4379,7 +4016,6 @@ void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex,
   return vsoxei64_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4391,7 +4027,6 @@ void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex,
   return vsoxei64_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4403,7 +4038,6 @@ void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex,
   return vsoxei64_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4415,7 +4049,6 @@ void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
   return vsoxei64_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4427,7 +4060,6 @@ void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex,
   return vsoxei8_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4439,7 +4071,6 @@ void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex,
   return vsoxei8_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4451,7 +4082,6 @@ void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex,
   return vsoxei8_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4463,7 +4093,6 @@ void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex,
   return vsoxei8_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4475,7 +4104,6 @@ void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
   return vsoxei16_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4487,7 +4115,6 @@ void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
   return vsoxei16_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4499,7 +4126,6 @@ void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex,
   return vsoxei16_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4511,7 +4137,6 @@ void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex,
   return vsoxei16_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4523,7 +4148,6 @@ void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
   return vsoxei32_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4535,7 +4159,6 @@ void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex,
   return vsoxei32_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4547,7 +4170,6 @@ void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex,
   return vsoxei32_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4559,7 +4181,6 @@ void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex,
   return vsoxei32_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4571,7 +4192,6 @@ void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex,
   return vsoxei64_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4583,7 +4203,6 @@ void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex,
   return vsoxei64_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4595,7 +4214,6 @@ void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex,
   return vsoxei64_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
index 7057e61e8cd23..250304d8e07fc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsra_vv_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsra_vv_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsra_vv_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsra_vv_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsra_vv_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsra_vv_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsra_vv_i8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
   return vsra_vx_i8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
   return vsra_vv_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
   return vsra_vx_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
   return vsra_vv_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsra_vv_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
   return vsra_vx_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsra_vv_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsra_vv_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
   return vsra_vx_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsra_vv_i16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
   return vsra_vx_i16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
   return vsra_vv_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsra_vv_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
   return vsra_vx_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsra_vv_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsra_vv_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
   return vsra_vx_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsra_vv_i32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
   return vsra_vx_i32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsra_vv_i64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
   return vsra_vx_i64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsra_vv_i64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
   return vsra_vx_i64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsra_vv_i64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
   return vsra_vx_i64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsra_vv_i64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
   return vsra_vx_i64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vsra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vsra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vsra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vsra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vsra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vsra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vsra_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vsra_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vsra_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vsra_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vsra_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vsra_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vsra_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vsra_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vsra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vsra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vsra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vsra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vsra_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vsra_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vsra_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vsra_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vsra_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vsra_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vsra_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vsra_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vsra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vsra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vsra_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vsra_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vsra_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vsra_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vsra_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vsra_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vsra_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vsra_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vsra_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vsra_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vsra_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vsra_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vsra_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vsra_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vsra_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
index 3d736d6071346..57ff0e5713b9a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsrl_vv_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsrl_vv_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsrl_vv_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsrl_vv_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsrl_vv_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsrl_vv_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsrl_vv_u8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
   return vsrl_vv_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
   return vsrl_vv_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsrl_vv_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsrl_vv_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsrl_vv_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsrl_vv_u16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
   return vsrl_vv_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsrl_vv_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsrl_vv_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsrl_vv_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsrl_vv_u32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsrl_vv_u64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsrl_vv_u64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsrl_vv_u64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsrl_vv_u64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
   return vsrl_vx_u64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
index 7202e25125b0b..921d9e2d0b497 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -17,7 +16,6 @@ void test_vsse8_v_i8mf8(int8_t *base, ptr
diff _t bstride, vint8mf8_t value,
   return vsse8_v_i8mf8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -29,7 +27,6 @@ void test_vsse8_v_i8mf4(int8_t *base, ptr
diff _t bstride, vint8mf4_t value,
   return vsse8_v_i8mf4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -41,7 +38,6 @@ void test_vsse8_v_i8mf2(int8_t *base, ptr
diff _t bstride, vint8mf2_t value,
   return vsse8_v_i8mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -53,7 +49,6 @@ void test_vsse8_v_i8m1(int8_t *base, ptr
diff _t bstride, vint8m1_t value,
   return vsse8_v_i8m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -65,7 +60,6 @@ void test_vsse8_v_i8m2(int8_t *base, ptr
diff _t bstride, vint8m2_t value,
   return vsse8_v_i8m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -77,7 +71,6 @@ void test_vsse8_v_i8m4(int8_t *base, ptr
diff _t bstride, vint8m4_t value,
   return vsse8_v_i8m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -89,7 +82,6 @@ void test_vsse8_v_i8m8(int8_t *base, ptr
diff _t bstride, vint8m8_t value,
   return vsse8_v_i8m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -101,7 +93,6 @@ void test_vsse16_v_i16mf4(int16_t *base, ptr
diff _t bstride, vint16mf4_t value,
   return vsse16_v_i16mf4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -113,7 +104,6 @@ void test_vsse16_v_i16mf2(int16_t *base, ptr
diff _t bstride, vint16mf2_t value,
   return vsse16_v_i16mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -125,7 +115,6 @@ void test_vsse16_v_i16m1(int16_t *base, ptr
diff _t bstride, vint16m1_t value,
   return vsse16_v_i16m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -137,7 +126,6 @@ void test_vsse16_v_i16m2(int16_t *base, ptr
diff _t bstride, vint16m2_t value,
   return vsse16_v_i16m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -149,7 +137,6 @@ void test_vsse16_v_i16m4(int16_t *base, ptr
diff _t bstride, vint16m4_t value,
   return vsse16_v_i16m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -161,7 +148,6 @@ void test_vsse16_v_i16m8(int16_t *base, ptr
diff _t bstride, vint16m8_t value,
   return vsse16_v_i16m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -173,7 +159,6 @@ void test_vsse32_v_i32mf2(int32_t *base, ptr
diff _t bstride, vint32mf2_t value,
   return vsse32_v_i32mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -185,7 +170,6 @@ void test_vsse32_v_i32m1(int32_t *base, ptr
diff _t bstride, vint32m1_t value,
   return vsse32_v_i32m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -197,7 +181,6 @@ void test_vsse32_v_i32m2(int32_t *base, ptr
diff _t bstride, vint32m2_t value,
   return vsse32_v_i32m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -209,7 +192,6 @@ void test_vsse32_v_i32m4(int32_t *base, ptr
diff _t bstride, vint32m4_t value,
   return vsse32_v_i32m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -221,7 +203,6 @@ void test_vsse32_v_i32m8(int32_t *base, ptr
diff _t bstride, vint32m8_t value,
   return vsse32_v_i32m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -233,7 +214,6 @@ void test_vsse64_v_i64m1(int64_t *base, ptr
diff _t bstride, vint64m1_t value,
   return vsse64_v_i64m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -245,7 +225,6 @@ void test_vsse64_v_i64m2(int64_t *base, ptr
diff _t bstride, vint64m2_t value,
   return vsse64_v_i64m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -257,7 +236,6 @@ void test_vsse64_v_i64m4(int64_t *base, ptr
diff _t bstride, vint64m4_t value,
   return vsse64_v_i64m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -269,7 +247,6 @@ void test_vsse64_v_i64m8(int64_t *base, ptr
diff _t bstride, vint64m8_t value,
   return vsse64_v_i64m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -281,7 +258,6 @@ void test_vsse8_v_u8mf8(uint8_t *base, ptr
diff _t bstride, vuint8mf8_t value,
   return vsse8_v_u8mf8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -293,7 +269,6 @@ void test_vsse8_v_u8mf4(uint8_t *base, ptr
diff _t bstride, vuint8mf4_t value,
   return vsse8_v_u8mf4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -305,7 +280,6 @@ void test_vsse8_v_u8mf2(uint8_t *base, ptr
diff _t bstride, vuint8mf2_t value,
   return vsse8_v_u8mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -317,7 +291,6 @@ void test_vsse8_v_u8m1(uint8_t *base, ptr
diff _t bstride, vuint8m1_t value,
   return vsse8_v_u8m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -329,7 +302,6 @@ void test_vsse8_v_u8m2(uint8_t *base, ptr
diff _t bstride, vuint8m2_t value,
   return vsse8_v_u8m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -341,7 +313,6 @@ void test_vsse8_v_u8m4(uint8_t *base, ptr
diff _t bstride, vuint8m4_t value,
   return vsse8_v_u8m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -353,7 +324,6 @@ void test_vsse8_v_u8m8(uint8_t *base, ptr
diff _t bstride, vuint8m8_t value,
   return vsse8_v_u8m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -365,7 +335,6 @@ void test_vsse16_v_u16mf4(uint16_t *base, ptr
diff _t bstride, vuint16mf4_t value,
   return vsse16_v_u16mf4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -377,7 +346,6 @@ void test_vsse16_v_u16mf2(uint16_t *base, ptr
diff _t bstride, vuint16mf2_t value,
   return vsse16_v_u16mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -389,7 +357,6 @@ void test_vsse16_v_u16m1(uint16_t *base, ptr
diff _t bstride, vuint16m1_t value,
   return vsse16_v_u16m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -401,7 +368,6 @@ void test_vsse16_v_u16m2(uint16_t *base, ptr
diff _t bstride, vuint16m2_t value,
   return vsse16_v_u16m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -413,7 +379,6 @@ void test_vsse16_v_u16m4(uint16_t *base, ptr
diff _t bstride, vuint16m4_t value,
   return vsse16_v_u16m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -425,7 +390,6 @@ void test_vsse16_v_u16m8(uint16_t *base, ptr
diff _t bstride, vuint16m8_t value,
   return vsse16_v_u16m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -437,7 +401,6 @@ void test_vsse32_v_u32mf2(uint32_t *base, ptr
diff _t bstride, vuint32mf2_t value,
   return vsse32_v_u32mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -449,7 +412,6 @@ void test_vsse32_v_u32m1(uint32_t *base, ptr
diff _t bstride, vuint32m1_t value,
   return vsse32_v_u32m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -461,7 +423,6 @@ void test_vsse32_v_u32m2(uint32_t *base, ptr
diff _t bstride, vuint32m2_t value,
   return vsse32_v_u32m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -473,7 +434,6 @@ void test_vsse32_v_u32m4(uint32_t *base, ptr
diff _t bstride, vuint32m4_t value,
   return vsse32_v_u32m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -485,7 +445,6 @@ void test_vsse32_v_u32m8(uint32_t *base, ptr
diff _t bstride, vuint32m8_t value,
   return vsse32_v_u32m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -497,7 +456,6 @@ void test_vsse64_v_u64m1(uint64_t *base, ptr
diff _t bstride, vuint64m1_t value,
   return vsse64_v_u64m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -509,7 +467,6 @@ void test_vsse64_v_u64m2(uint64_t *base, ptr
diff _t bstride, vuint64m2_t value,
   return vsse64_v_u64m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -521,7 +478,6 @@ void test_vsse64_v_u64m4(uint64_t *base, ptr
diff _t bstride, vuint64m4_t value,
   return vsse64_v_u64m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -533,7 +489,6 @@ void test_vsse64_v_u64m8(uint64_t *base, ptr
diff _t bstride, vuint64m8_t value,
   return vsse64_v_u64m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -545,7 +500,6 @@ void test_vsse32_v_f32mf2(float *base, ptr
diff _t bstride, vfloat32mf2_t value,
   return vsse32_v_f32mf2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -557,7 +511,6 @@ void test_vsse32_v_f32m1(float *base, ptr
diff _t bstride, vfloat32m1_t value,
   return vsse32_v_f32m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -569,7 +522,6 @@ void test_vsse32_v_f32m2(float *base, ptr
diff _t bstride, vfloat32m2_t value,
   return vsse32_v_f32m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -581,7 +533,6 @@ void test_vsse32_v_f32m4(float *base, ptr
diff _t bstride, vfloat32m4_t value,
   return vsse32_v_f32m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -593,7 +544,6 @@ void test_vsse32_v_f32m8(float *base, ptr
diff _t bstride, vfloat32m8_t value,
   return vsse32_v_f32m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -605,7 +555,6 @@ void test_vsse64_v_f64m1(double *base, ptr
diff _t bstride, vfloat64m1_t value,
   return vsse64_v_f64m1(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -617,7 +566,6 @@ void test_vsse64_v_f64m2(double *base, ptr
diff _t bstride, vfloat64m2_t value,
   return vsse64_v_f64m2(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -629,7 +577,6 @@ void test_vsse64_v_f64m4(double *base, ptr
diff _t bstride, vfloat64m4_t value,
   return vsse64_v_f64m4(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -641,7 +588,6 @@ void test_vsse64_v_f64m8(double *base, ptr
diff _t bstride, vfloat64m8_t value,
   return vsse64_v_f64m8(base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -653,7 +599,6 @@ void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8mf8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -665,7 +610,6 @@ void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8mf4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -677,7 +621,6 @@ void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -689,7 +632,6 @@ void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -701,7 +643,6 @@ void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -713,7 +654,6 @@ void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -725,7 +665,6 @@ void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptr
diff _t bstride,
   return vsse8_v_i8m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -737,7 +676,6 @@ void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptr
diff _t bstride,
   return vsse16_v_i16mf4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -749,7 +687,6 @@ void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptr
diff _t bstride,
   return vsse16_v_i16mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -761,7 +698,6 @@ void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptr
diff _t bstride,
   return vsse16_v_i16m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -773,7 +709,6 @@ void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptr
diff _t bstride,
   return vsse16_v_i16m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -785,7 +720,6 @@ void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptr
diff _t bstride,
   return vsse16_v_i16m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -797,7 +731,6 @@ void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptr
diff _t bstride,
   return vsse16_v_i16m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -809,7 +742,6 @@ void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptr
diff _t bstride,
   return vsse32_v_i32mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -821,7 +753,6 @@ void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptr
diff _t bstride,
   return vsse32_v_i32m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -833,7 +764,6 @@ void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptr
diff _t bstride,
   return vsse32_v_i32m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -845,7 +775,6 @@ void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptr
diff _t bstride,
   return vsse32_v_i32m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -857,7 +786,6 @@ void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptr
diff _t bstride,
   return vsse32_v_i32m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -869,7 +797,6 @@ void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptr
diff _t bstride,
   return vsse64_v_i64m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -881,7 +808,6 @@ void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptr
diff _t bstride,
   return vsse64_v_i64m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -893,7 +819,6 @@ void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptr
diff _t bstride,
   return vsse64_v_i64m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -905,7 +830,6 @@ void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptr
diff _t bstride,
   return vsse64_v_i64m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -917,7 +841,6 @@ void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8mf8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -929,7 +852,6 @@ void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8mf4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -941,7 +863,6 @@ void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -953,7 +874,6 @@ void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -965,7 +885,6 @@ void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -977,7 +896,6 @@ void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -989,7 +907,6 @@ void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptr
diff _t bstride,
   return vsse8_v_u8m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1001,7 +918,6 @@ void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptr
diff _t bstride,
   return vsse16_v_u16mf4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1013,7 +929,6 @@ void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptr
diff _t bstride,
   return vsse16_v_u16mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1025,7 +940,6 @@ void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptr
diff _t bstride,
   return vsse16_v_u16m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1037,7 +951,6 @@ void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptr
diff _t bstride,
   return vsse16_v_u16m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1049,7 +962,6 @@ void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptr
diff _t bstride,
   return vsse16_v_u16m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1061,7 +973,6 @@ void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptr
diff _t bstride,
   return vsse16_v_u16m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1073,7 +984,6 @@ void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptr
diff _t bstride,
   return vsse32_v_u32mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1085,7 +995,6 @@ void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptr
diff _t bstride,
   return vsse32_v_u32m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1097,7 +1006,6 @@ void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptr
diff _t bstride,
   return vsse32_v_u32m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1109,7 +1017,6 @@ void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptr
diff _t bstride,
   return vsse32_v_u32m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1121,7 +1028,6 @@ void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptr
diff _t bstride,
   return vsse32_v_u32m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1133,7 +1039,6 @@ void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptr
diff _t bstride,
   return vsse64_v_u64m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1145,7 +1050,6 @@ void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptr
diff _t bstride,
   return vsse64_v_u64m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1157,7 +1061,6 @@ void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptr
diff _t bstride,
   return vsse64_v_u64m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1169,7 +1072,6 @@ void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptr
diff _t bstride,
   return vsse64_v_u64m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1181,7 +1083,6 @@ void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptr
diff _t bstride,
   return vsse32_v_f32mf2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1193,7 +1094,6 @@ void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptr
diff _t bstride,
   return vsse32_v_f32m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1205,7 +1105,6 @@ void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptr
diff _t bstride,
   return vsse32_v_f32m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1217,7 +1116,6 @@ void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptr
diff _t bstride,
   return vsse32_v_f32m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1229,7 +1127,6 @@ void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptr
diff _t bstride,
   return vsse32_v_f32m8_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1241,7 +1138,6 @@ void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptr
diff _t bstride,
   return vsse64_v_f64m1_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1253,7 +1149,6 @@ void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptr
diff _t bstride,
   return vsse64_v_f64m2_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1265,7 +1160,6 @@ void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptr
diff _t bstride,
   return vsse64_v_f64m4_m(mask, base, bstride, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
index 4f65cef341a12..5748a793e53ec 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vssra_vv_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vssra_vv_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vssra_vv_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vssra_vv_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vssra_vv_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vssra_vv_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vssra_vv_i8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
   return vssra_vx_i8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
   return vssra_vv_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
   return vssra_vx_i16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
   return vssra_vv_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vssra_vv_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
   return vssra_vx_i16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vssra_vv_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vssra_vv_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
   return vssra_vx_i16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vssra_vv_i16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
   return vssra_vx_i16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
   return vssra_vv_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vssra_vv_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
   return vssra_vx_i32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vssra_vv_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vssra_vv_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
   return vssra_vx_i32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vssra_vv_i32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
   return vssra_vx_i32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vssra_vv_i64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
   return vssra_vx_i64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vssra_vv_i64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
   return vssra_vx_i64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vssra_vv_i64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
   return vssra_vx_i64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vssra_vv_i64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
   return vssra_vx_i64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -458,7 +413,6 @@ vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vssra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -469,7 +423,6 @@ vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vssra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -480,7 +433,6 @@ vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vssra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -491,7 +443,6 @@ vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vssra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -502,7 +453,6 @@ vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vssra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -513,7 +463,6 @@ vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vssra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +473,6 @@ vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vssra_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -535,7 +483,6 @@ vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vssra_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -546,7 +493,6 @@ vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vssra_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -557,7 +503,6 @@ vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vssra_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -568,7 +513,6 @@ vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vssra_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -579,7 +523,6 @@ vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vssra_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -590,7 +533,6 @@ vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vssra_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -601,7 +543,6 @@ vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vssra_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -613,7 +554,6 @@ vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vssra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -624,7 +564,6 @@ vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vssra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -636,7 +575,6 @@ vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vssra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -647,7 +585,6 @@ vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vssra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -658,7 +595,6 @@ vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vssra_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -669,7 +605,6 @@ vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vssra_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +615,6 @@ vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vssra_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -691,7 +625,6 @@ vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vssra_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -702,7 +635,6 @@ vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vssra_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -713,7 +645,6 @@ vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vssra_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -724,7 +655,6 @@ vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vssra_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -735,7 +665,6 @@ vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vssra_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -747,7 +676,6 @@ vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vssra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -758,7 +686,6 @@ vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vssra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -769,7 +696,6 @@ vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vssra_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -780,7 +706,6 @@ vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vssra_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -791,7 +716,6 @@ vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vssra_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -802,7 +726,6 @@ vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vssra_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -813,7 +736,6 @@ vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vssra_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +746,6 @@ vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vssra_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -835,7 +756,6 @@ vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vssra_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -846,7 +766,6 @@ vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vssra_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -857,7 +776,6 @@ vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vssra_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -868,7 +786,6 @@ vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vssra_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -879,7 +796,6 @@ vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vssra_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -890,7 +806,6 @@ vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vssra_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -901,7 +816,6 @@ vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vssra_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -912,7 +826,6 @@ vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vssra_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -923,7 +836,6 @@ vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vssra_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
index 7e0691ba89ece..dfda5f9512581 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vssrl_vv_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8mf8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vssrl_vv_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vssrl_vv_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vssrl_vv_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vssrl_vv_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vssrl_vv_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vssrl_vv_u8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u8m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
   return vssrl_vv_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u16mf4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
   return vssrl_vv_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u16mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vssrl_vv_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u16m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vssrl_vv_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u16m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vssrl_vv_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u16m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vssrl_vv_u16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u16m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
   return vssrl_vv_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u32mf2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vssrl_vv_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u32m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vssrl_vv_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u32m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vssrl_vv_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u32m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vssrl_vv_u32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u32m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vssrl_vv_u64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u64m1(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vssrl_vv_u64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u64m2(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vssrl_vv_u64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u64m4(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vssrl_vv_u64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
   return vssrl_vx_u64m8(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -459,7 +414,6 @@ vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vssrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -470,7 +424,6 @@ vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vssrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -482,7 +435,6 @@ vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vssrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -493,7 +445,6 @@ vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vssrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -505,7 +456,6 @@ vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vssrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -516,7 +466,6 @@ vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vssrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -527,7 +476,6 @@ vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -538,7 +486,6 @@ vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -549,7 +496,6 @@ vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +506,6 @@ vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -571,7 +516,6 @@ vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -582,7 +526,6 @@ vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -593,7 +536,6 @@ vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +546,6 @@ vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -616,7 +557,6 @@ vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vssrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -627,7 +567,6 @@ vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vssrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -639,7 +578,6 @@ vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vssrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -650,7 +588,6 @@ vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vssrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -662,7 +599,6 @@ vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -673,7 +609,6 @@ vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -685,7 +620,6 @@ vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -696,7 +630,6 @@ vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -708,7 +641,6 @@ vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -719,7 +651,6 @@ vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -731,7 +662,6 @@ vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -742,7 +672,6 @@ vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -754,7 +683,6 @@ vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vssrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -765,7 +693,6 @@ vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vssrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -777,7 +704,6 @@ vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +714,6 @@ vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +725,6 @@ vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -811,7 +735,6 @@ vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -823,7 +746,6 @@ vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -834,7 +756,6 @@ vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -846,7 +767,6 @@ vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -857,7 +777,6 @@ vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -869,7 +788,6 @@ vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -880,7 +798,6 @@ vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -892,7 +809,6 @@ vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -903,7 +819,6 @@ vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -915,7 +830,6 @@ vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -926,7 +840,6 @@ vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -938,7 +851,6 @@ vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
index cf29ac0a719fa..d0cfc7f5e1eca 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vssub_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vssub_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vssub_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vssub_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vssub_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vssub_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vssub_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vssub_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vssub_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vssub_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vssub_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vssub_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vssub_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vssub_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vssub_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vssub_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vssub_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vssub_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vssub_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vssub_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vssub_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vssub_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vssub_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vssub_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vssub_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vssub_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vssub_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vssub_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vssub_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vssub_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vssub_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vssub_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vssub_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vssub_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vssub_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vssub_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vssub_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vssub_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vssubu_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vssubu_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vssubu_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vssubu_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vssubu_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vssubu_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vssubu_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vssubu_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vssubu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vssubu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vssubu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vssubu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vssubu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vssubu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vssubu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vssubu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vssubu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vssubu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vssubu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vssubu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vssubu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vssubu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vssubu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vssubu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vssubu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vssubu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vssubu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vssubu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vssubu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vssubu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vssubu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vssubu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vssubu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vssubu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vssubu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vssubu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vssubu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -887,7 +799,6 @@ vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vssubu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -898,7 +809,6 @@ vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vssub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -909,7 +819,6 @@ vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
   return vssub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +829,6 @@ vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vssub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
   return vssub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -942,7 +849,6 @@ vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vssub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -953,7 +859,6 @@ vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
   return vssub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +869,6 @@ vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -975,7 +879,6 @@ vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
   return vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -986,7 +889,6 @@ vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -997,7 +899,6 @@ vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
   return vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1008,7 +909,6 @@ vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1019,7 +919,6 @@ vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
   return vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +929,6 @@ vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +939,6 @@ vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
   return vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +950,6 @@ vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vssub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +960,6 @@ vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vssub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +971,6 @@ vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vssub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1087,7 +981,6 @@ vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vssub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1098,7 +991,6 @@ vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1109,7 +1001,6 @@ vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1120,7 +1011,6 @@ vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1021,6 @@ vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1031,6 @@ vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1153,7 +1041,6 @@ vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1051,6 @@ vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1175,7 +1061,6 @@ vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1072,6 @@ vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vssub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1198,7 +1082,6 @@ vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vssub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1209,7 +1092,6 @@ vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1220,7 +1102,6 @@ vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1231,7 +1112,6 @@ vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1242,7 +1122,6 @@ vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1253,7 +1132,6 @@ vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1142,6 @@ vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1275,7 +1152,6 @@ vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1162,6 @@ vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1297,7 +1172,6 @@ vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1308,7 +1182,6 @@ vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1319,7 +1192,6 @@ vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1330,7 +1202,6 @@ vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1341,7 +1212,6 @@ vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1352,7 +1222,6 @@ vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1363,7 +1232,6 @@ vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1242,6 @@ vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1253,6 @@ vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vssubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1397,7 +1263,6 @@ vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vssubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1274,6 @@ vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vssubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1420,7 +1284,6 @@ vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vssubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1295,6 @@ vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vssubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1443,7 +1305,6 @@ vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vssubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1315,6 @@ vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1325,6 @@ vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1335,6 @@ vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1487,7 +1345,6 @@ vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1498,7 +1355,6 @@ vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1509,7 +1365,6 @@ vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1520,7 +1375,6 @@ vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1531,7 +1385,6 @@ vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1396,6 @@ vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vssubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1407,6 @@ vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vssubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1418,6 @@ vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vssubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1429,6 @@ vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vssubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1440,6 @@ vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1602,7 +1450,6 @@ vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1461,6 @@ vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1471,6 @@ vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1482,6 @@ vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vssubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vssubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1588,6 @@ vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1598,6 @@ vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1609,6 @@ vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1619,6 @@ vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1630,6 @@ vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1810,7 +1640,6 @@ vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1651,6 @@ vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1833,7 +1661,6 @@ vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1672,6 @@ vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1682,6 @@ vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1693,6 @@ vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
index a733d5b8eeb23..599da0b67cef7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vsub_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vsub_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vsub_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vsub_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vsub_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vsub_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vsub_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vsub_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vsub_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vsub_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vsub_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vsub_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vsub_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vsub_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vsub_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vsub_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vsub_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vsub_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vsub_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vsub_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vsub_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vsub_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vsub_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vsub_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vsub_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vsub_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vsub_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vsub_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vsub_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vsub_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vsub_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vsub_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vsub_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vsub_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vsub_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vsub_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vsub_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vsub_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vsub_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vsub_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vsub_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vsub_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vsub_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vsub_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vsub_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vsub_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vsub_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vsub_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vsub_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vsub_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vsub_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vsub_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vsub_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vsub_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vsub_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vsub_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vsub_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vsub_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vsub_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vsub_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vsub_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vsub_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vsub_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vsub_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vsub_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vsub_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vsub_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vsub_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vsub_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vsub_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vsub_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vsub_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vsub_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vsub_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vsub_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vsub_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vsub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vsub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vsub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vsub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vsub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vsub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vsub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vsub_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vsub_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vsub_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vsub_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vsub_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vsub_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vsub_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vsub_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vsub_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vsub_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vsub_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vsub_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vsub_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vsub_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vsub_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vsub_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vsub_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vsub_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vsub_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vsub_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vsub_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vsub_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
index 1482b0e9ffa7b..b954ec6f3aab6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -17,7 +16,6 @@ void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value,
   return vsuxei8_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -29,7 +27,6 @@ void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value,
   return vsuxei8_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -41,7 +38,6 @@ void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value,
   return vsuxei8_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -53,7 +49,6 @@ void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value,
   return vsuxei8_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -65,7 +60,6 @@ void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value,
   return vsuxei8_v_i8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -77,7 +71,6 @@ void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value,
   return vsuxei8_v_i8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -89,7 +82,6 @@ void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value,
   return vsuxei8_v_i8m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -101,7 +93,6 @@ void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
   return vsuxei16_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -113,7 +104,6 @@ void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
   return vsuxei16_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -125,7 +115,6 @@ void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value,
   return vsuxei16_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -137,7 +126,6 @@ void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value,
   return vsuxei16_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -149,7 +137,6 @@ void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value,
   return vsuxei16_v_i8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -161,7 +148,6 @@ void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value,
   return vsuxei16_v_i8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -173,7 +159,6 @@ void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
   return vsuxei32_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -185,7 +170,6 @@ void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value,
   return vsuxei32_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -197,7 +181,6 @@ void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value,
   return vsuxei32_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -209,7 +192,6 @@ void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value,
   return vsuxei32_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -221,7 +203,6 @@ void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value,
   return vsuxei32_v_i8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -233,7 +214,6 @@ void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value,
   return vsuxei64_v_i8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -245,7 +225,6 @@ void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value,
   return vsuxei64_v_i8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -257,7 +236,6 @@ void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value,
   return vsuxei64_v_i8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -269,7 +247,6 @@ void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value,
   return vsuxei64_v_i8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -281,7 +258,6 @@ void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
   return vsuxei8_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -293,7 +269,6 @@ void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
   return vsuxei8_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -305,7 +280,6 @@ void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value,
   return vsuxei8_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -317,7 +291,6 @@ void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value,
   return vsuxei8_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -329,7 +302,6 @@ void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value,
   return vsuxei8_v_i16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -341,7 +313,6 @@ void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value,
   return vsuxei8_v_i16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -353,7 +324,6 @@ void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -365,7 +335,6 @@ void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -377,7 +346,6 @@ void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
   return vsuxei16_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -389,7 +357,6 @@ void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
   return vsuxei16_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -401,7 +368,6 @@ void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
   return vsuxei16_v_i16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -413,7 +379,6 @@ void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
   return vsuxei16_v_i16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -425,7 +390,6 @@ void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -437,7 +401,6 @@ void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex,
   return vsuxei32_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -449,7 +412,6 @@ void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
   return vsuxei32_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -461,7 +423,6 @@ void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
   return vsuxei32_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -473,7 +434,6 @@ void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
   return vsuxei32_v_i16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -485,7 +445,6 @@ void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex,
   return vsuxei64_v_i16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -497,7 +456,6 @@ void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex,
   return vsuxei64_v_i16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -509,7 +467,6 @@ void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
   return vsuxei64_v_i16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -521,7 +478,6 @@ void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
   return vsuxei64_v_i16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -533,7 +489,6 @@ void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
   return vsuxei8_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -545,7 +500,6 @@ void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value,
   return vsuxei8_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -557,7 +511,6 @@ void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value,
   return vsuxei8_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -569,7 +522,6 @@ void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value,
   return vsuxei8_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -581,7 +533,6 @@ void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value,
   return vsuxei8_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -593,7 +544,6 @@ void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -605,7 +555,6 @@ void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
   return vsuxei16_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -617,7 +566,6 @@ void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
   return vsuxei16_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -629,7 +577,6 @@ void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
   return vsuxei16_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -641,7 +588,6 @@ void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
   return vsuxei16_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -653,7 +599,6 @@ void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -665,7 +610,6 @@ void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
   return vsuxei32_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -677,7 +621,6 @@ void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
   return vsuxei32_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -689,7 +632,6 @@ void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
   return vsuxei32_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -701,7 +643,6 @@ void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
   return vsuxei32_v_i32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -713,7 +654,6 @@ void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
   return vsuxei64_v_i32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -725,7 +665,6 @@ void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
   return vsuxei64_v_i32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -737,7 +676,6 @@ void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
   return vsuxei64_v_i32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -749,7 +687,6 @@ void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
   return vsuxei64_v_i32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -761,7 +698,6 @@ void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value,
   return vsuxei8_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -773,7 +709,6 @@ void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value,
   return vsuxei8_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -785,7 +720,6 @@ void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value,
   return vsuxei8_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -797,7 +731,6 @@ void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value,
   return vsuxei8_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -809,7 +742,6 @@ void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
   return vsuxei16_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -821,7 +753,6 @@ void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
   return vsuxei16_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -833,7 +764,6 @@ void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
   return vsuxei16_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -845,7 +775,6 @@ void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
   return vsuxei16_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -857,7 +786,6 @@ void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
   return vsuxei32_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -869,7 +797,6 @@ void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
   return vsuxei32_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -881,7 +808,6 @@ void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
   return vsuxei32_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -893,7 +819,6 @@ void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
   return vsuxei32_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -905,7 +830,6 @@ void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
   return vsuxei64_v_i64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -917,7 +841,6 @@ void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
   return vsuxei64_v_i64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -929,7 +852,6 @@ void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
   return vsuxei64_v_i64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -941,7 +863,6 @@ void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
   return vsuxei64_v_i64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -953,7 +874,6 @@ void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
   return vsuxei8_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -965,7 +885,6 @@ void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
   return vsuxei8_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -977,7 +896,6 @@ void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
   return vsuxei8_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -989,7 +907,6 @@ void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value,
   return vsuxei8_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1001,7 +918,6 @@ void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value,
   return vsuxei8_v_u8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1013,7 +929,6 @@ void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value,
   return vsuxei8_v_u8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -1025,7 +940,6 @@ void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value,
   return vsuxei8_v_u8m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1037,7 +951,6 @@ void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1049,7 +962,6 @@ void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1061,7 +973,6 @@ void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
   return vsuxei16_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1073,7 +984,6 @@ void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value,
   return vsuxei16_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1085,7 +995,6 @@ void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value,
   return vsuxei16_v_u8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1097,7 +1006,6 @@ void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value,
   return vsuxei16_v_u8m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1109,7 +1017,6 @@ void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1121,7 +1028,6 @@ void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
   return vsuxei32_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1133,7 +1039,6 @@ void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
   return vsuxei32_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1145,7 +1050,6 @@ void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value,
   return vsuxei32_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1157,7 +1061,6 @@ void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value,
   return vsuxei32_v_u8m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1169,7 +1072,6 @@ void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
   return vsuxei64_v_u8mf8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1181,7 +1083,6 @@ void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
   return vsuxei64_v_u8mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1193,7 +1094,6 @@ void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
   return vsuxei64_v_u8mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1205,7 +1105,6 @@ void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value,
   return vsuxei64_v_u8m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1217,7 +1116,6 @@ void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1229,7 +1127,6 @@ void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1241,7 +1138,6 @@ void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
   return vsuxei8_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1253,7 +1149,6 @@ void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
   return vsuxei8_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1265,7 +1160,6 @@ void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
   return vsuxei8_v_u16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1277,7 +1171,6 @@ void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
   return vsuxei8_v_u16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1289,7 +1182,6 @@ void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1301,7 +1193,6 @@ void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1313,7 +1204,6 @@ void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1325,7 +1215,6 @@ void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1337,7 +1226,6 @@ void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex,
   return vsuxei16_v_u16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1349,7 +1237,6 @@ void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex,
   return vsuxei16_v_u16m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1361,7 +1248,6 @@ void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1373,7 +1259,6 @@ void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex,
   return vsuxei32_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1385,7 +1270,6 @@ void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1397,7 +1281,6 @@ void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1409,7 +1292,6 @@ void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex,
   return vsuxei32_v_u16m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1421,7 +1303,6 @@ void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex,
   return vsuxei64_v_u16mf4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1433,7 +1314,6 @@ void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex,
   return vsuxei64_v_u16mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1445,7 +1325,6 @@ void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u16m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1457,7 +1336,6 @@ void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u16m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1469,7 +1347,6 @@ void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1481,7 +1358,6 @@ void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
   return vsuxei8_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1493,7 +1369,6 @@ void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
   return vsuxei8_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1505,7 +1380,6 @@ void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
   return vsuxei8_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1517,7 +1391,6 @@ void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
   return vsuxei8_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1529,7 +1402,6 @@ void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1541,7 +1413,6 @@ void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1553,7 +1424,6 @@ void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1565,7 +1435,6 @@ void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1577,7 +1446,6 @@ void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
   return vsuxei16_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1589,7 +1457,6 @@ void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1601,7 +1468,6 @@ void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
   return vsuxei32_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1613,7 +1479,6 @@ void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1625,7 +1490,6 @@ void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1637,7 +1501,6 @@ void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
   return vsuxei32_v_u32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1649,7 +1512,6 @@ void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
   return vsuxei64_v_u32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1661,7 +1523,6 @@ void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
   return vsuxei64_v_u32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1673,7 +1534,6 @@ void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1685,7 +1545,6 @@ void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1697,7 +1556,6 @@ void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
   return vsuxei8_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1709,7 +1567,6 @@ void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
   return vsuxei8_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1721,7 +1578,6 @@ void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
   return vsuxei8_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1733,7 +1589,6 @@ void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
   return vsuxei8_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1745,7 +1600,6 @@ void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1757,7 +1611,6 @@ void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1769,7 +1622,6 @@ void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1781,7 +1633,6 @@ void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1793,7 +1644,6 @@ void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1805,7 +1655,6 @@ void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
   return vsuxei32_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1817,7 +1666,6 @@ void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1829,7 +1677,6 @@ void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1841,7 +1688,6 @@ void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
   return vsuxei64_v_u64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1853,7 +1699,6 @@ void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
   return vsuxei64_v_u64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1865,7 +1710,6 @@ void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1877,7 +1721,6 @@ void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1889,7 +1732,6 @@ void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
   return vsuxei8_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1901,7 +1743,6 @@ void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value,
   return vsuxei8_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1913,7 +1754,6 @@ void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value,
   return vsuxei8_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1925,7 +1765,6 @@ void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value,
   return vsuxei8_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1937,7 +1776,6 @@ void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value,
   return vsuxei8_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1949,7 +1787,6 @@ void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex,
   return vsuxei16_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1961,7 +1798,6 @@ void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
   return vsuxei16_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1973,7 +1809,6 @@ void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
   return vsuxei16_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1985,7 +1820,6 @@ void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
   return vsuxei16_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1997,7 +1831,6 @@ void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
   return vsuxei16_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2009,7 +1842,6 @@ void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex,
   return vsuxei32_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2021,7 +1853,6 @@ void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
   return vsuxei32_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2033,7 +1864,6 @@ void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
   return vsuxei32_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2045,7 +1875,6 @@ void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
   return vsuxei32_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2057,7 +1886,6 @@ void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
   return vsuxei32_v_f32m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2069,7 +1897,6 @@ void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex,
   return vsuxei64_v_f32mf2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2081,7 +1908,6 @@ void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
   return vsuxei64_v_f32m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2093,7 +1919,6 @@ void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
   return vsuxei64_v_f32m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2105,7 +1930,6 @@ void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
   return vsuxei64_v_f32m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2117,7 +1941,6 @@ void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
   return vsuxei8_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2129,7 +1952,6 @@ void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
   return vsuxei8_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2141,7 +1963,6 @@ void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
   return vsuxei8_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2153,7 +1974,6 @@ void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value,
   return vsuxei8_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2165,7 +1985,6 @@ void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex,
   return vsuxei16_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2177,7 +1996,6 @@ void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex,
   return vsuxei16_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2189,7 +2007,6 @@ void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
   return vsuxei16_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2201,7 +2018,6 @@ void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
   return vsuxei16_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2213,7 +2029,6 @@ void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex,
   return vsuxei32_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2225,7 +2040,6 @@ void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
   return vsuxei32_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2237,7 +2051,6 @@ void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
   return vsuxei32_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2249,7 +2062,6 @@ void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
   return vsuxei32_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2261,7 +2073,6 @@ void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
   return vsuxei64_v_f64m1(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2273,7 +2084,6 @@ void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
   return vsuxei64_v_f64m2(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2285,7 +2095,6 @@ void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
   return vsuxei64_v_f64m4(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2297,7 +2106,6 @@ void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
   return vsuxei64_v_f64m8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2309,7 +2117,6 @@ void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2321,7 +2128,6 @@ void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2333,7 +2139,6 @@ void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2345,7 +2150,6 @@ void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex,
   return vsuxei8_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2357,7 +2161,6 @@ void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex,
   return vsuxei8_v_i8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2369,7 +2172,6 @@ void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex,
   return vsuxei8_v_i8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -2381,7 +2183,6 @@ void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex,
   return vsuxei8_v_i8m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2393,7 +2194,6 @@ void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2405,7 +2205,6 @@ void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2417,7 +2216,6 @@ void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex,
   return vsuxei16_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2429,7 +2227,6 @@ void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
   return vsuxei16_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2441,7 +2238,6 @@ void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
   return vsuxei16_v_i8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -2453,7 +2249,6 @@ void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
   return vsuxei16_v_i8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2465,7 +2260,6 @@ void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2477,7 +2271,6 @@ void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex,
   return vsuxei32_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2489,7 +2282,6 @@ void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex,
   return vsuxei32_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2501,7 +2293,6 @@ void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
   return vsuxei32_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -2513,7 +2304,6 @@ void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
   return vsuxei32_v_i8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -2525,7 +2315,6 @@ void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex,
   return vsuxei64_v_i8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -2537,7 +2326,6 @@ void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex,
   return vsuxei64_v_i8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -2549,7 +2337,6 @@ void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex,
   return vsuxei64_v_i8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -2561,7 +2348,6 @@ void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
   return vsuxei64_v_i8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2573,7 +2359,6 @@ void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2585,7 +2370,6 @@ void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2597,7 +2381,6 @@ void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2609,7 +2392,6 @@ void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
   return vsuxei8_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2621,7 +2403,6 @@ void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
   return vsuxei8_v_i16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2633,7 +2414,6 @@ void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex,
   return vsuxei8_v_i16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2646,7 +2426,6 @@ void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base,
   return vsuxei16_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2659,7 +2438,6 @@ void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base,
   return vsuxei16_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2671,7 +2449,6 @@ void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
   return vsuxei16_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2683,7 +2460,6 @@ void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex,
   return vsuxei16_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2695,7 +2471,6 @@ void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex,
   return vsuxei16_v_i16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -2707,7 +2482,6 @@ void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex,
   return vsuxei16_v_i16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2720,7 +2494,6 @@ void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base,
   return vsuxei32_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2732,7 +2505,6 @@ void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
   return vsuxei32_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2744,7 +2516,6 @@ void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
   return vsuxei32_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2756,7 +2527,6 @@ void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex,
   return vsuxei32_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -2768,7 +2538,6 @@ void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex,
   return vsuxei32_v_i16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -2780,7 +2549,6 @@ void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
   return vsuxei64_v_i16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -2792,7 +2560,6 @@ void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
   return vsuxei64_v_i16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -2804,7 +2571,6 @@ void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
   return vsuxei64_v_i16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -2816,7 +2582,6 @@ void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex,
   return vsuxei64_v_i16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2828,7 +2593,6 @@ void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2840,7 +2604,6 @@ void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2852,7 +2615,6 @@ void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2864,7 +2626,6 @@ void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
   return vsuxei8_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2876,7 +2637,6 @@ void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex,
   return vsuxei8_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2889,7 +2649,6 @@ void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vsuxei16_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2901,7 +2660,6 @@ void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2913,7 +2671,6 @@ void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
   return vsuxei16_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2925,7 +2682,6 @@ void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex,
   return vsuxei16_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2937,7 +2693,6 @@ void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex,
   return vsuxei16_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -2950,7 +2705,6 @@ void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vsuxei32_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -2962,7 +2716,6 @@ void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
   return vsuxei32_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -2974,7 +2727,6 @@ void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
   return vsuxei32_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -2986,7 +2738,6 @@ void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex,
   return vsuxei32_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -2998,7 +2749,6 @@ void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex,
   return vsuxei32_v_i32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3010,7 +2760,6 @@ void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
   return vsuxei64_v_i32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3022,7 +2771,6 @@ void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
   return vsuxei64_v_i32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3034,7 +2782,6 @@ void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
   return vsuxei64_v_i32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3046,7 +2793,6 @@ void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex,
   return vsuxei64_v_i32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3058,7 +2804,6 @@ void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3070,7 +2815,6 @@ void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3082,7 +2826,6 @@ void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3094,7 +2837,6 @@ void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex,
   return vsuxei8_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3106,7 +2848,6 @@ void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3118,7 +2859,6 @@ void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3130,7 +2870,6 @@ void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
   return vsuxei16_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3142,7 +2881,6 @@ void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex,
   return vsuxei16_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3154,7 +2892,6 @@ void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3166,7 +2903,6 @@ void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
   return vsuxei32_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3178,7 +2914,6 @@ void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
   return vsuxei32_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3190,7 +2925,6 @@ void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex,
   return vsuxei32_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -3202,7 +2936,6 @@ void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
   return vsuxei64_v_i64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -3214,7 +2947,6 @@ void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
   return vsuxei64_v_i64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -3226,7 +2958,6 @@ void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
   return vsuxei64_v_i64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -3238,7 +2969,6 @@ void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex,
   return vsuxei64_v_i64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3250,7 +2980,6 @@ void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3262,7 +2991,6 @@ void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3274,7 +3002,6 @@ void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3286,7 +3013,6 @@ void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
   return vsuxei8_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3298,7 +3024,6 @@ void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
   return vsuxei8_v_u8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3310,7 +3035,6 @@ void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
   return vsuxei8_v_u8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -3322,7 +3046,6 @@ void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex,
   return vsuxei8_v_u8m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3334,7 +3057,6 @@ void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
   return vsuxei16_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3346,7 +3068,6 @@ void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
   return vsuxei16_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3358,7 +3079,6 @@ void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3370,7 +3090,6 @@ void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3382,7 +3101,6 @@ void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex,
   return vsuxei16_v_u8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -3394,7 +3112,6 @@ void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex,
   return vsuxei16_v_u8m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3406,7 +3123,6 @@ void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
   return vsuxei32_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3418,7 +3134,6 @@ void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
   return vsuxei32_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3430,7 +3145,6 @@ void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3442,7 +3156,6 @@ void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -3454,7 +3167,6 @@ void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex,
   return vsuxei32_v_u8m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -3466,7 +3178,6 @@ void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
   return vsuxei64_v_u8mf8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -3478,7 +3189,6 @@ void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
   return vsuxei64_v_u8mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -3490,7 +3200,6 @@ void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u8mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -3502,7 +3211,6 @@ void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u8m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3514,7 +3222,6 @@ void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3526,7 +3233,6 @@ void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3538,7 +3244,6 @@ void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3550,7 +3255,6 @@ void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex,
   return vsuxei8_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3562,7 +3266,6 @@ void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex,
   return vsuxei8_v_u16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3574,7 +3277,6 @@ void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex,
   return vsuxei8_v_u16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3587,7 +3289,6 @@ void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base,
   return vsuxei16_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3600,7 +3301,6 @@ void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base,
   return vsuxei16_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3612,7 +3312,6 @@ void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3624,7 +3323,6 @@ void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3636,7 +3334,6 @@ void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
   return vsuxei16_v_u16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -3648,7 +3345,6 @@ void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
   return vsuxei16_v_u16m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3661,7 +3357,6 @@ void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base,
   return vsuxei32_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3674,7 +3369,6 @@ void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base,
   return vsuxei32_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3686,7 +3380,6 @@ void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3698,7 +3391,6 @@ void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -3710,7 +3402,6 @@ void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
   return vsuxei32_v_u16m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -3723,7 +3414,6 @@ void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base,
   return vsuxei64_v_u16mf4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -3736,7 +3426,6 @@ void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base,
   return vsuxei64_v_u16mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -3748,7 +3437,6 @@ void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u16m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -3760,7 +3448,6 @@ void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u16m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3772,7 +3459,6 @@ void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3784,7 +3470,6 @@ void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3796,7 +3481,6 @@ void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3808,7 +3492,6 @@ void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex,
   return vsuxei8_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3820,7 +3503,6 @@ void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex,
   return vsuxei8_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3833,7 +3515,6 @@ void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vsuxei16_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3846,7 +3527,6 @@ void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
   return vsuxei16_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3858,7 +3538,6 @@ void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3870,7 +3549,6 @@ void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3882,7 +3560,6 @@ void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
   return vsuxei16_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3895,7 +3572,6 @@ void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vsuxei32_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3907,7 +3583,6 @@ void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
   return vsuxei32_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3919,7 +3594,6 @@ void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3931,7 +3605,6 @@ void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -3943,7 +3616,6 @@ void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
   return vsuxei32_v_u32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -3956,7 +3628,6 @@ void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vsuxei64_v_u32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -3968,7 +3639,6 @@ void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
   return vsuxei64_v_u32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -3980,7 +3650,6 @@ void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -3992,7 +3661,6 @@ void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4004,7 +3672,6 @@ void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
   return vsuxei8_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4016,7 +3683,6 @@ void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
   return vsuxei8_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4028,7 +3694,6 @@ void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
   return vsuxei8_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4040,7 +3705,6 @@ void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex,
   return vsuxei8_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4053,7 +3717,6 @@ void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vsuxei16_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4066,7 +3729,6 @@ void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
   return vsuxei16_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4078,7 +3740,6 @@ void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
   return vsuxei16_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4090,7 +3751,6 @@ void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex,
   return vsuxei16_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4103,7 +3763,6 @@ void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vsuxei32_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4115,7 +3774,6 @@ void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
   return vsuxei32_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4127,7 +3785,6 @@ void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
   return vsuxei32_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4139,7 +3796,6 @@ void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex,
   return vsuxei32_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -4151,7 +3807,6 @@ void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
   return vsuxei64_v_u64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -4163,7 +3818,6 @@ void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
   return vsuxei64_v_u64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -4175,7 +3829,6 @@ void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
   return vsuxei64_v_u64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -4187,7 +3840,6 @@ void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex,
   return vsuxei64_v_u64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4199,7 +3851,6 @@ void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex,
   return vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4211,7 +3862,6 @@ void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
   return vsuxei8_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4223,7 +3873,6 @@ void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
   return vsuxei8_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4235,7 +3884,6 @@ void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex,
   return vsuxei8_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -4247,7 +3895,6 @@ void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex,
   return vsuxei8_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4259,7 +3906,6 @@ void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
   return vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4271,7 +3917,6 @@ void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex,
   return vsuxei16_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4283,7 +3928,6 @@ void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex,
   return vsuxei16_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4295,7 +3939,6 @@ void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
   return vsuxei16_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -4307,7 +3950,6 @@ void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex,
   return vsuxei16_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4319,7 +3961,6 @@ void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
   return vsuxei32_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4331,7 +3972,6 @@ void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex,
   return vsuxei32_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4343,7 +3983,6 @@ void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex,
   return vsuxei32_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4355,7 +3994,6 @@ void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
   return vsuxei32_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -4367,7 +4005,6 @@ void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex,
   return vsuxei32_v_f32m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -4379,7 +4016,6 @@ void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex,
   return vsuxei64_v_f32mf2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -4391,7 +4027,6 @@ void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex,
   return vsuxei64_v_f32m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -4403,7 +4038,6 @@ void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex,
   return vsuxei64_v_f32m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -4415,7 +4049,6 @@ void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
   return vsuxei64_v_f32m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4427,7 +4060,6 @@ void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex,
   return vsuxei8_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4439,7 +4071,6 @@ void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex,
   return vsuxei8_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4451,7 +4082,6 @@ void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex,
   return vsuxei8_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4463,7 +4093,6 @@ void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex,
   return vsuxei8_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4475,7 +4104,6 @@ void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
   return vsuxei16_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4487,7 +4115,6 @@ void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
   return vsuxei16_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4499,7 +4126,6 @@ void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex,
   return vsuxei16_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4511,7 +4137,6 @@ void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex,
   return vsuxei16_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4523,7 +4148,6 @@ void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
   return vsuxei32_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4535,7 +4159,6 @@ void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex,
   return vsuxei32_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4547,7 +4170,6 @@ void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex,
   return vsuxei32_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -4559,7 +4181,6 @@ void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex,
   return vsuxei32_v_f64m8_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -4571,7 +4192,6 @@ void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex,
   return vsuxei64_v_f64m1_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -4583,7 +4203,6 @@ void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex,
   return vsuxei64_v_f64m2_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -4595,7 +4214,6 @@ void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex,
   return vsuxei64_v_f64m4_m(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
index 396040d07b327..503849942e5e4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
@@ -5,371 +5,318 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> undef
 //
 vint8mf8_t test_vundefined_i8mf8() { return vundefined_i8mf8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> undef
 //
 vint8mf4_t test_vundefined_i8mf4() { return vundefined_i8mf4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> undef
 //
 vint8mf2_t test_vundefined_i8mf2() { return vundefined_i8mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> undef
 //
 vint8m1_t test_vundefined_i8m1() { return vundefined_i8m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> undef
 //
 vint8m2_t test_vundefined_i8m2() { return vundefined_i8m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> undef
 //
 vint8m4_t test_vundefined_i8m4() { return vundefined_i8m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> undef
 //
 vint8m8_t test_vundefined_i8m8() { return vundefined_i8m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> undef
 //
 vint16mf4_t test_vundefined_i16mf4() { return vundefined_i16mf4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> undef
 //
 vint16mf2_t test_vundefined_i16mf2() { return vundefined_i16mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> undef
 //
 vint16m1_t test_vundefined_i16m1() { return vundefined_i16m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> undef
 //
 vint16m2_t test_vundefined_i16m2() { return vundefined_i16m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> undef
 //
 vint16m4_t test_vundefined_i16m4() { return vundefined_i16m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> undef
 //
 vint16m8_t test_vundefined_i16m8() { return vundefined_i16m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> undef
 //
 vint32mf2_t test_vundefined_i32mf2() { return vundefined_i32mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> undef
 //
 vint32m1_t test_vundefined_i32m1() { return vundefined_i32m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> undef
 //
 vint32m2_t test_vundefined_i32m2() { return vundefined_i32m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> undef
 //
 vint32m4_t test_vundefined_i32m4() { return vundefined_i32m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> undef
 //
 vint32m8_t test_vundefined_i32m8() { return vundefined_i32m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> undef
 //
 vint64m1_t test_vundefined_i64m1() { return vundefined_i64m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> undef
 //
 vint64m2_t test_vundefined_i64m2() { return vundefined_i64m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> undef
 //
 vint64m4_t test_vundefined_i64m4() { return vundefined_i64m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> undef
 //
 vint64m8_t test_vundefined_i64m8() { return vundefined_i64m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> undef
 //
 vuint8mf8_t test_vundefined_u8mf8() { return vundefined_u8mf8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> undef
 //
 vuint8mf4_t test_vundefined_u8mf4() { return vundefined_u8mf4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> undef
 //
 vuint8mf2_t test_vundefined_u8mf2() { return vundefined_u8mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> undef
 //
 vuint8m1_t test_vundefined_u8m1() { return vundefined_u8m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> undef
 //
 vuint8m2_t test_vundefined_u8m2() { return vundefined_u8m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> undef
 //
 vuint8m4_t test_vundefined_u8m4() { return vundefined_u8m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> undef
 //
 vuint8m8_t test_vundefined_u8m8() { return vundefined_u8m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> undef
 //
 vuint16mf4_t test_vundefined_u16mf4() { return vundefined_u16mf4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> undef
 //
 vuint16mf2_t test_vundefined_u16mf2() { return vundefined_u16mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> undef
 //
 vuint16m1_t test_vundefined_u16m1() { return vundefined_u16m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> undef
 //
 vuint16m2_t test_vundefined_u16m2() { return vundefined_u16m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> undef
 //
 vuint16m4_t test_vundefined_u16m4() { return vundefined_u16m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> undef
 //
 vuint16m8_t test_vundefined_u16m8() { return vundefined_u16m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> undef
 //
 vuint32mf2_t test_vundefined_u32mf2() { return vundefined_u32mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> undef
 //
 vuint32m1_t test_vundefined_u32m1() { return vundefined_u32m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> undef
 //
 vuint32m2_t test_vundefined_u32m2() { return vundefined_u32m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> undef
 //
 vuint32m4_t test_vundefined_u32m4() { return vundefined_u32m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> undef
 //
 vuint32m8_t test_vundefined_u32m8() { return vundefined_u32m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> undef
 //
 vuint64m1_t test_vundefined_u64m1() { return vundefined_u64m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> undef
 //
 vuint64m2_t test_vundefined_u64m2() { return vundefined_u64m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> undef
 //
 vuint64m4_t test_vundefined_u64m4() { return vundefined_u64m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> undef
 //
 vuint64m8_t test_vundefined_u64m8() { return vundefined_u64m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> undef
 //
 vfloat32mf2_t test_vundefined_f32mf2() { return vundefined_f32mf2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> undef
 //
 vfloat32m1_t test_vundefined_f32m1() { return vundefined_f32m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> undef
 //
 vfloat32m2_t test_vundefined_f32m2() { return vundefined_f32m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> undef
 //
 vfloat32m4_t test_vundefined_f32m4() { return vundefined_f32m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> undef
 //
 vfloat32m8_t test_vundefined_f32m8() { return vundefined_f32m8(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> undef
 //
 vfloat64m1_t test_vundefined_f64m1() { return vundefined_f64m1(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> undef
 //
 vfloat64m2_t test_vundefined_f64m2() { return vundefined_f64m2(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> undef
 //
 vfloat64m4_t test_vundefined_f64m4() { return vundefined_f64m4(); }
 
-//
 // CHECK-RV64-LABEL: @test_vundefined_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> undef

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
index 37ffc4b6f3365..db9f710d571f3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vwadd_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vwadd_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
   return vwadd_wv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
   return vwadd_wx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vwadd_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vwadd_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
   return vwadd_wv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
   return vwadd_wx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vwadd_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vwadd_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
   return vwadd_wv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
   return vwadd_wx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vwadd_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
   return vwadd_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
   return vwadd_wv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
   return vwadd_wx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vwadd_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vwadd_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
   return vwadd_wv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
   return vwadd_wx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vwadd_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
   return vwadd_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
   return vwadd_wv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
   return vwadd_wx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vwadd_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vwadd_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
   return vwadd_wv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
   return vwadd_wx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vwadd_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vwadd_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
   return vwadd_wv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
   return vwadd_wx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vwadd_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
   return vwadd_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
   return vwadd_wv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
   return vwadd_wx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vwadd_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
   return vwadd_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
   return vwadd_wv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
   return vwadd_wx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vwadd_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
   return vwadd_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
   return vwadd_wv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
   return vwadd_wx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vwadd_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vwadd_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
   return vwadd_wv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
   return vwadd_wx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vwadd_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
   return vwadd_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
   return vwadd_wv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
   return vwadd_wx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vwadd_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
   return vwadd_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
   return vwadd_wv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
   return vwadd_wx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vwadd_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vwadd_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
   return vwadd_wv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
   return vwadd_wx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
   return vwaddu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
   return vwaddu_wv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -657,7 +592,6 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
   return vwaddu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -667,7 +601,6 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -678,7 +611,6 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
   return vwaddu_wv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -688,7 +620,6 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -698,7 +629,6 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwaddu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -708,7 +638,6 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -718,7 +647,6 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
   return vwaddu_wv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -728,7 +656,6 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -738,7 +665,6 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwaddu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -748,7 +674,6 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -758,7 +683,6 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
   return vwaddu_wv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -768,7 +692,6 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -778,7 +701,6 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwaddu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -788,7 +710,6 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -798,7 +719,6 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
   return vwaddu_wv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -808,7 +728,6 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -818,7 +737,6 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwaddu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -828,7 +746,6 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -838,7 +755,6 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
   return vwaddu_wv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +764,6 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -859,7 +774,6 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
   return vwaddu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +783,6 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
   return vwaddu_wv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
   return vwaddu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -911,7 +821,6 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +830,6 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
   return vwaddu_wv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -941,7 +848,6 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwaddu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -951,7 +857,6 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -961,7 +866,6 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
   return vwaddu_wv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -971,7 +875,6 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -981,7 +884,6 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwaddu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -991,7 +893,6 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1001,7 +902,6 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
   return vwaddu_wv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +911,6 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1021,7 +920,6 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwaddu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +929,6 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +938,6 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
   return vwaddu_wv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1051,7 +947,6 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1062,7 +957,6 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
   return vwaddu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +966,6 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1082,7 +975,6 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
   return vwaddu_wv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1092,7 +984,6 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1102,7 +993,6 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwaddu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1002,6 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1122,7 +1011,6 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
   return vwaddu_wv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1020,6 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1029,6 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwaddu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1038,6 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1162,7 +1047,6 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
   return vwaddu_wv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1172,7 +1056,6 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1182,7 +1065,6 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwaddu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1074,6 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1202,7 +1083,6 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
   return vwaddu_wv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1212,7 +1092,6 @@ vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1223,7 +1102,6 @@ vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1112,6 @@ vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1122,6 @@ vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwadd_wv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1132,6 @@ vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwadd_wx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1267,7 +1142,6 @@ vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1278,7 +1152,6 @@ vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1289,7 +1162,6 @@ vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwadd_wv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1300,7 +1172,6 @@ vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwadd_wx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1182,6 @@ vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1192,6 @@ vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1202,6 @@ vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwadd_wv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1212,6 @@ vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwadd_wx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1222,6 @@ vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1232,6 @@ vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1377,7 +1242,6 @@ vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwadd_wv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1388,7 +1252,6 @@ vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwadd_wx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1399,7 +1262,6 @@ vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1410,7 +1272,6 @@ vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1421,7 +1282,6 @@ vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwadd_wv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1292,6 @@ vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwadd_wx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1443,7 +1302,6 @@ vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1312,6 @@ vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1322,6 @@ vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwadd_wv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1332,6 @@ vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwadd_wx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1488,7 +1343,6 @@ vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1499,7 +1353,6 @@ vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1511,7 +1364,6 @@ vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwadd_wv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1522,7 +1374,6 @@ vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwadd_wx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1533,7 +1384,6 @@ vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1394,6 @@ vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1404,6 @@ vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwadd_wv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1566,7 +1414,6 @@ vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwadd_wx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1577,7 +1424,6 @@ vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1588,7 +1434,6 @@ vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1599,7 +1444,6 @@ vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwadd_wv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1610,7 +1454,6 @@ vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwadd_wx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1621,7 +1464,6 @@ vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1632,7 +1474,6 @@ vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1643,7 +1484,6 @@ vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwadd_wv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1494,6 @@ vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwadd_wx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1665,7 +1504,6 @@ vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1676,7 +1514,6 @@ vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1687,7 +1524,6 @@ vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwadd_wv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1698,7 +1534,6 @@ vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwadd_wx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1709,7 +1544,6 @@ vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1720,7 +1554,6 @@ vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1731,7 +1564,6 @@ vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwadd_wv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1742,7 +1574,6 @@ vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwadd_wx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1584,6 @@ vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1594,6 @@ vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1775,7 +1604,6 @@ vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwadd_wv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1786,7 +1614,6 @@ vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwadd_wx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1797,7 +1624,6 @@ vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1808,7 +1634,6 @@ vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1819,7 +1644,6 @@ vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwadd_wv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1830,7 +1654,6 @@ vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwadd_wx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1841,7 +1664,6 @@ vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1852,7 +1674,6 @@ vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1863,7 +1684,6 @@ vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwadd_wv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1874,7 +1694,6 @@ vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwadd_wx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1886,7 +1705,6 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1897,7 +1715,6 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1909,7 +1726,6 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwaddu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1920,7 +1736,6 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwaddu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1932,7 +1747,6 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1943,7 +1757,6 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1955,7 +1768,6 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwaddu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1966,7 +1778,6 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwaddu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1978,7 +1789,6 @@ vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1989,7 +1799,6 @@ vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2001,7 +1810,6 @@ vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwaddu_wv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2012,7 +1820,6 @@ vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwaddu_wx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2023,7 +1830,6 @@ vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2034,7 +1840,6 @@ vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2045,7 +1850,6 @@ vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwaddu_wv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2056,7 +1860,6 @@ vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwaddu_wx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2067,7 +1870,6 @@ vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2078,7 +1880,6 @@ vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2089,7 +1890,6 @@ vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwaddu_wv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2100,7 +1900,6 @@ vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwaddu_wx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2111,7 +1910,6 @@ vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2122,7 +1920,6 @@ vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2133,7 +1930,6 @@ vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwaddu_wv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2144,7 +1940,6 @@ vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwaddu_wx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2156,7 +1951,6 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2168,7 +1962,6 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2180,7 +1973,6 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwaddu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2192,7 +1984,6 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwaddu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2204,7 +1995,6 @@ vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2215,7 +2005,6 @@ vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2227,7 +2016,6 @@ vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwaddu_wv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2238,7 +2026,6 @@ vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwaddu_wx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2250,7 +2037,6 @@ vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2261,7 +2047,6 @@ vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2273,7 +2058,6 @@ vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwaddu_wv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2284,7 +2068,6 @@ vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwaddu_wx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2296,7 +2079,6 @@ vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2307,7 +2089,6 @@ vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2319,7 +2100,6 @@ vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwaddu_wv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2330,7 +2110,6 @@ vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwaddu_wx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2342,7 +2121,6 @@ vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2353,7 +2131,6 @@ vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2365,7 +2142,6 @@ vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwaddu_wv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2376,7 +2152,6 @@ vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwaddu_wx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2388,7 +2163,6 @@ vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2399,7 +2173,6 @@ vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2411,7 +2184,6 @@ vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwaddu_wv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2422,7 +2194,6 @@ vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwaddu_wx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2434,7 +2205,6 @@ vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2445,7 +2215,6 @@ vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2457,7 +2226,6 @@ vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwaddu_wv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2468,7 +2236,6 @@ vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwaddu_wx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2480,7 +2247,6 @@ vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2491,7 +2257,6 @@ vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2503,7 +2268,6 @@ vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwaddu_wv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2514,7 +2278,6 @@ vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwaddu_wx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2526,7 +2289,6 @@ vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vwaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2537,7 +2299,6 @@ vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vwaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2549,7 +2310,6 @@ vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vwaddu_wv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
index 32f17317f524b..07e79e9db52a6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
   return vwcvt_x_x_v_i16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
   return vwcvt_x_x_v_i16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
   return vwcvt_x_x_v_i16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
   return vwcvt_x_x_v_i16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
   return vwcvt_x_x_v_i16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
   return vwcvt_x_x_v_i16m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
   return vwcvtu_x_x_v_u16mf4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
   return vwcvtu_x_x_v_u16mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
   return vwcvtu_x_x_v_u16m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
   return vwcvtu_x_x_v_u16m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
   return vwcvtu_x_x_v_u16m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
   return vwcvtu_x_x_v_u16m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
   return vwcvt_x_x_v_i32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
   return vwcvt_x_x_v_i32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
   return vwcvt_x_x_v_i32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
   return vwcvt_x_x_v_i32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
   return vwcvt_x_x_v_i32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
   return vwcvtu_x_x_v_u32mf2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
   return vwcvtu_x_x_v_u32m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
   return vwcvtu_x_x_v_u32m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
   return vwcvtu_x_x_v_u32m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
   return vwcvtu_x_x_v_u32m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
   return vwcvt_x_x_v_i64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
   return vwcvt_x_x_v_i64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
   return vwcvt_x_x_v_i64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
   return vwcvt_x_x_v_i64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
   return vwcvtu_x_x_v_u64m1(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
   return vwcvtu_x_x_v_u64m2(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
   return vwcvtu_x_x_v_u64m4(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
   return vwcvtu_x_x_v_u64m8(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi
   return vwcvt_x_x_v_i16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi
   return vwcvt_x_x_v_i16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8
   return vwcvt_x_x_v_i16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m
   return vwcvt_x_x_v_i16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m
   return vwcvt_x_x_v_i16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m
   return vwcvt_x_x_v_i16m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff,
   return vwcvtu_x_x_v_u16mf4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff,
   return vwcvtu_x_x_v_u16mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vu
   return vwcvtu_x_x_v_u16m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vui
   return vwcvtu_x_x_v_u16m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vui
   return vwcvtu_x_x_v_u16m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vui
   return vwcvtu_x_x_v_u16m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi
   return vwcvt_x_x_v_i32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint1
   return vwcvt_x_x_v_i32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint1
   return vwcvt_x_x_v_i32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16
   return vwcvt_x_x_v_i32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16
   return vwcvt_x_x_v_i32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff,
   return vwcvtu_x_x_v_u32mf2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vu
   return vwcvtu_x_x_v_u32m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vu
   return vwcvtu_x_x_v_u32m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vui
   return vwcvtu_x_x_v_u32m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vui
   return vwcvtu_x_x_v_u32m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint3
   return vwcvt_x_x_v_i64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint3
   return vwcvt_x_x_v_i64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint3
   return vwcvt_x_x_v_i64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32
   return vwcvt_x_x_v_i64m8_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vu
   return vwcvtu_x_x_v_u64m1_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vu
   return vwcvtu_x_x_v_u64m2_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vu
   return vwcvtu_x_x_v_u64m4_m(mask, maskedoff, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
index 56d6d5f2279ad..8c74789447bd0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
   return vwmacc_vv_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
   return vwmacc_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
   return vwmacc_vv_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
   return vwmacc_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
   return vwmacc_vv_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
   return vwmacc_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
   return vwmacc_vv_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
   return vwmacc_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
   return vwmacc_vv_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
   return vwmacc_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
   return vwmacc_vv_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
   return vwmacc_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
   return vwmacc_vv_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
   return vwmacc_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
   return vwmacc_vv_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
   return vwmacc_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
   return vwmacc_vv_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
   return vwmacc_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
   return vwmacc_vv_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
   return vwmacc_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
   return vwmacc_vv_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
   return vwmacc_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
   return vwmacc_vv_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
   return vwmacc_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
   return vwmacc_vv_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
   return vwmacc_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
   return vwmacc_vv_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
   return vwmacc_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
   return vwmacc_vv_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
   return vwmacc_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
   return vwmaccu_vv_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
   return vwmaccu_vx_u16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
   return vwmaccu_vv_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
   return vwmaccu_vx_u16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
   return vwmaccu_vv_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
   return vwmaccu_vx_u16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
   return vwmaccu_vv_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
   return vwmaccu_vx_u16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
   return vwmaccu_vv_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
   return vwmaccu_vx_u16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
   return vwmaccu_vv_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
   return vwmaccu_vx_u16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
   return vwmaccu_vv_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
   return vwmaccu_vx_u32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -499,7 +454,6 @@ vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
   return vwmaccu_vv_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +464,6 @@ vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
   return vwmaccu_vx_u32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -521,7 +474,6 @@ vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
   return vwmaccu_vv_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -532,7 +484,6 @@ vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
   return vwmaccu_vx_u32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -543,7 +494,6 @@ vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
   return vwmaccu_vv_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +504,6 @@ vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
   return vwmaccu_vx_u32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -565,7 +514,6 @@ vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
   return vwmaccu_vv_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -576,7 +524,6 @@ vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
   return vwmaccu_vx_u32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -587,7 +534,6 @@ vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
   return vwmaccu_vv_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -598,7 +544,6 @@ vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
   return vwmaccu_vx_u64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -609,7 +554,6 @@ vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
   return vwmaccu_vv_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -620,7 +564,6 @@ vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
   return vwmaccu_vx_u64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -631,7 +574,6 @@ vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
   return vwmaccu_vv_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -642,7 +584,6 @@ vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
   return vwmaccu_vx_u64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -653,7 +594,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
   return vwmaccu_vv_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +604,6 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
   return vwmaccu_vx_u64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -675,7 +614,6 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
   return vwmaccsu_vv_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +624,6 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
   return vwmaccsu_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -697,7 +634,6 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
   return vwmaccsu_vv_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -708,7 +644,6 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
   return vwmaccsu_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -719,7 +654,6 @@ vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
   return vwmaccsu_vv_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -730,7 +664,6 @@ vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
   return vwmaccsu_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -741,7 +674,6 @@ vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
   return vwmaccsu_vv_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -752,7 +684,6 @@ vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
   return vwmaccsu_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -763,7 +694,6 @@ vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
   return vwmaccsu_vv_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +704,6 @@ vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
   return vwmaccsu_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
   return vwmaccsu_vv_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -796,7 +724,6 @@ vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
   return vwmaccsu_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +734,6 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
   return vwmaccsu_vv_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -818,7 +744,6 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
   return vwmaccsu_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -829,7 +754,6 @@ vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
   return vwmaccsu_vv_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -840,7 +764,6 @@ vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
   return vwmaccsu_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -851,7 +774,6 @@ vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
   return vwmaccsu_vv_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -862,7 +784,6 @@ vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
   return vwmaccsu_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -873,7 +794,6 @@ vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
   return vwmaccsu_vv_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +804,6 @@ vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
   return vwmaccsu_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -895,7 +814,6 @@ vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
   return vwmaccsu_vv_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -906,7 +824,6 @@ vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
   return vwmaccsu_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -917,7 +834,6 @@ vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
   return vwmaccsu_vv_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -928,7 +844,6 @@ vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
   return vwmaccsu_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -939,7 +854,6 @@ vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
   return vwmaccsu_vv_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -950,7 +864,6 @@ vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
   return vwmaccsu_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -961,7 +874,6 @@ vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
   return vwmaccsu_vv_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -972,7 +884,6 @@ vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
   return vwmaccsu_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -983,7 +894,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
   return vwmaccsu_vv_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -994,7 +904,6 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
   return vwmaccsu_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +914,6 @@ vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
   return vwmaccus_vx_i16mf4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1016,7 +924,6 @@ vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
   return vwmaccus_vx_i16mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1027,7 +934,6 @@ vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
   return vwmaccus_vx_i16m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1038,7 +944,6 @@ vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
   return vwmaccus_vx_i16m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1049,7 +954,6 @@ vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
   return vwmaccus_vx_i16m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1060,7 +964,6 @@ vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
   return vwmaccus_vx_i16m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1071,7 +974,6 @@ vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
   return vwmaccus_vx_i32mf2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1082,7 +984,6 @@ vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
   return vwmaccus_vx_i32m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1093,7 +994,6 @@ vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
   return vwmaccus_vx_i32m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +1004,6 @@ vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
   return vwmaccus_vx_i32m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1115,7 +1014,6 @@ vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
   return vwmaccus_vx_i32m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1126,7 +1024,6 @@ vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
   return vwmaccus_vx_i64m1(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1137,7 +1034,6 @@ vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
   return vwmaccus_vx_i64m2(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1148,7 +1044,6 @@ vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
   return vwmaccus_vx_i64m4(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1159,7 +1054,6 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
   return vwmaccus_vx_i64m8(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1170,7 +1064,6 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1181,7 +1074,6 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
   return vwmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1084,6 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1203,7 +1094,6 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
   return vwmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1104,6 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
   return vwmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1114,6 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
   return vwmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1124,6 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
   return vwmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1134,6 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
   return vwmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1258,7 +1144,6 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
   return vwmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1269,7 +1154,6 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
   return vwmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1280,7 +1164,6 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
   return vwmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1291,7 +1174,6 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
   return vwmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1303,7 +1185,6 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1195,6 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1325,7 +1205,6 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
   return vwmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1336,7 +1215,6 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
   return vwmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1347,7 +1225,6 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
   return vwmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1358,7 +1235,6 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
   return vwmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1369,7 +1245,6 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
   return vwmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1380,7 +1255,6 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
   return vwmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1391,7 +1265,6 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
   return vwmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1402,7 +1275,6 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
   return vwmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1413,7 +1285,6 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
   return vwmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1295,6 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
   return vwmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1305,6 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
   return vwmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1315,6 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
   return vwmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1457,7 +1325,6 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
   return vwmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1468,7 +1335,6 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
   return vwmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1345,6 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
   return vwmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1355,6 @@ vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
   return vwmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1502,7 +1366,6 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
   return vwmaccu_vv_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1513,7 +1376,6 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
   return vwmaccu_vx_u16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1525,7 +1387,6 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
   return vwmaccu_vv_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1536,7 +1397,6 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
   return vwmaccu_vx_u16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1548,7 +1408,6 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
   return vwmaccu_vv_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1559,7 +1418,6 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
   return vwmaccu_vx_u16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1570,7 +1428,6 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
   return vwmaccu_vv_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1581,7 +1438,6 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
   return vwmaccu_vx_u16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1592,7 +1448,6 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
   return vwmaccu_vv_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1603,7 +1458,6 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
   return vwmaccu_vx_u16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1468,6 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
   return vwmaccu_vv_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1478,6 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
   return vwmaccu_vx_u16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1489,6 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
   return vwmaccu_vv_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1500,6 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
   return vwmaccu_vx_u32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1661,7 +1511,6 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
   return vwmaccu_vv_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1521,6 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
   return vwmaccu_vx_u32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1532,6 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
   return vwmaccu_vv_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1542,6 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
   return vwmaccu_vx_u32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1553,6 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
   return vwmaccu_vv_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1563,6 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
   return vwmaccu_vx_u32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1574,6 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
   return vwmaccu_vv_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1584,6 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
   return vwmaccu_vx_u32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1595,6 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
   return vwmaccu_vv_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1605,6 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
   return vwmaccu_vx_u64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1616,6 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
   return vwmaccu_vv_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1626,6 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
   return vwmaccu_vx_u64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1637,6 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
   return vwmaccu_vv_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1810,7 +1647,6 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
   return vwmaccu_vx_u64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1658,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
   return vwmaccu_vv_u64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1833,7 +1668,6 @@ vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
   return vwmaccu_vx_u64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1679,6 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmaccsu_vv_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1689,6 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmaccsu_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1700,6 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmaccsu_vv_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1879,7 +1710,6 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmaccsu_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1891,7 +1721,6 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
   return vwmaccsu_vv_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1902,7 +1731,6 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
   return vwmaccsu_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1913,7 +1741,6 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
   return vwmaccsu_vv_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1924,7 +1751,6 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
   return vwmaccsu_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1935,7 +1761,6 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
   return vwmaccsu_vv_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1946,7 +1771,6 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
   return vwmaccsu_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1957,7 +1781,6 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
   return vwmaccsu_vv_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1968,7 +1791,6 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
   return vwmaccsu_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1980,7 +1802,6 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmaccsu_vv_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1992,7 +1813,6 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmaccsu_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2004,7 +1824,6 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
   return vwmaccsu_vv_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2015,7 +1834,6 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
   return vwmaccsu_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2027,7 +1845,6 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
   return vwmaccsu_vv_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2038,7 +1855,6 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
   return vwmaccsu_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2050,7 +1866,6 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
   return vwmaccsu_vv_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2061,7 +1876,6 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
   return vwmaccsu_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2073,7 +1887,6 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
   return vwmaccsu_vv_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2084,7 +1897,6 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
   return vwmaccsu_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2096,7 +1908,6 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
   return vwmaccsu_vv_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2107,7 +1918,6 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
   return vwmaccsu_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2119,7 +1929,6 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
   return vwmaccsu_vv_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2130,7 +1939,6 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
   return vwmaccsu_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2142,7 +1950,6 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
   return vwmaccsu_vv_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2153,7 +1960,6 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
   return vwmaccsu_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2165,7 +1971,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
   return vwmaccsu_vv_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2176,7 +1981,6 @@ vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
   return vwmaccsu_vx_i64m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2187,7 +1991,6 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmaccus_vx_i16mf4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2198,7 +2001,6 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmaccus_vx_i16mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2209,7 +2011,6 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
   return vwmaccus_vx_i16m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2220,7 +2021,6 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
   return vwmaccus_vx_i16m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2231,7 +2031,6 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
   return vwmaccus_vx_i16m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2242,7 +2041,6 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
   return vwmaccus_vx_i16m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2254,7 +2052,6 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmaccus_vx_i32mf2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2265,7 +2062,6 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
   return vwmaccus_vx_i32m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2276,7 +2072,6 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
   return vwmaccus_vx_i32m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2287,7 +2082,6 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
   return vwmaccus_vx_i32m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2298,7 +2092,6 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
   return vwmaccus_vx_i32m8_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2309,7 +2102,6 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
   return vwmaccus_vx_i64m1_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2320,7 +2112,6 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
   return vwmaccus_vx_i64m2_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2331,7 +2122,6 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
   return vwmaccus_vx_i64m4_m(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
index 92163d1ee47f8..181125f344407 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vwmul_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vwmul_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vwmul_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vwmul_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vwmul_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vwmul_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vwmul_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
   return vwmul_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vwmul_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vwmul_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vwmul_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
   return vwmul_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vwmul_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vwmul_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vwmul_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vwmul_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vwmul_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
   return vwmul_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vwmul_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
   return vwmul_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vwmul_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
   return vwmul_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vwmul_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vwmul_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vwmul_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
   return vwmul_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vwmul_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
   return vwmul_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vwmul_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vwmul_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
   return vwmulu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwmulu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
   return vwmulu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwmulu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwmulu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwmulu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwmulu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vwmulu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwmulu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vwmulu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwmulu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vwmulu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
   return vwmulu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwmulu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vwmulu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwmulu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwmulu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vwmulu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwmulu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vwmulu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwmulu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vwmulu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vwmulu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwmulu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwmulu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vwmulu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwmulu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vwmulu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwmulu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vwmulu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vwmulsu_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwmulsu_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vwmulsu_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwmulsu_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwmulsu_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwmulsu_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwmulsu_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
   return vwmulsu_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwmulsu_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
   return vwmulsu_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwmulsu_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
   return vwmulsu_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vwmulsu_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwmulsu_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
   return vwmulsu_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwmulsu_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwmulsu_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
   return vwmulsu_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwmulsu_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
   return vwmulsu_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwmulsu_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
   return vwmulsu_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
   return vwmulsu_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwmulsu_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwmulsu_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
   return vwmulsu_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwmulsu_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
   return vwmulsu_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwmulsu_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
   return vwmulsu_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8m
   return vwmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8m
   return vwmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8m
   return vwmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8m
   return vwmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_
   return vwmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_
   return vwmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
   return vwmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
   return vwmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
   return vwmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
   return vwmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
   return vwmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
   return vwmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16
   return vwmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16
   return vwmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2
   return vwmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2
   return vwmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_
   return vwmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_
   return vwmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t
   return vwmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t
   return vwmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t
   return vwmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t
   return vwmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2
   return vwmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2
   return vwmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_
   return vwmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_
   return vwmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_
   return vwmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_
   return vwmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t
   return vwmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t
   return vwmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui
   return vwmulu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui
   return vwmulu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui
   return vwmulu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui
   return vwmulu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8
   return vwmulu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8
   return vwmulu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m
   return vwmulu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m
   return vwmulu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m
   return vwmulu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m
   return vwmulu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m
   return vwmulu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m
   return vwmulu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui
   return vwmulu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui
   return vwmulu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint1
   return vwmulu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint1
   return vwmulu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint1
   return vwmulu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint1
   return vwmulu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16
   return vwmulu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16
   return vwmulu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16
   return vwmulu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16
   return vwmulu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint3
   return vwmulu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint3
   return vwmulu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint3
   return vwmulu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint3
   return vwmulu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint3
   return vwmulu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint3
   return vwmulu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32
   return vwmulu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32
   return vwmulu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint
   return vwmulsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint
   return vwmulsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint
   return vwmulsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint
   return vwmulsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf
   return vwmulsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf
   return vwmulsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
   return vwmulsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
   return vwmulsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
   return vwmulsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
   return vwmulsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
   return vwmulsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
   return vwmulsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint
   return vwmulsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint
   return vwmulsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16m
   return vwmulsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16m
   return vwmulsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m
   return vwmulsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m
   return vwmulsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2
   return vwmulsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2
   return vwmulsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4
   return vwmulsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4
   return vwmulsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32m
   return vwmulsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32m
   return vwmulsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m
   return vwmulsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1588,6 @@ vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m
   return vwmulsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1597,6 @@ vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m
   return vwmulsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1784,7 +1606,6 @@ vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m
   return vwmulsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1794,7 +1615,6 @@ vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4
   return vwmulsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
index 98a434d145511..8b0ff2bcc5f63 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector,
   return vwredsum_vs_i8mf8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector,
   return vwredsum_vs_i8mf4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector,
   return vwredsum_vs_i8mf2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector,
   return vwredsum_vs_i8m1_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector,
   return vwredsum_vs_i8m2_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector,
   return vwredsum_vs_i8m4_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector,
   return vwredsum_vs_i8m8_i16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector,
   return vwredsum_vs_i16mf4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector,
   return vwredsum_vs_i16mf2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector,
   return vwredsum_vs_i16m1_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector,
   return vwredsum_vs_i16m2_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector,
   return vwredsum_vs_i16m4_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector,
   return vwredsum_vs_i16m8_i32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector,
   return vwredsum_vs_i32mf2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector,
   return vwredsum_vs_i32m1_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector,
   return vwredsum_vs_i32m2_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector,
   return vwredsum_vs_i32m4_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector,
   return vwredsum_vs_i32m8_i64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector,
   return vwredsumu_vs_u8mf8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector,
   return vwredsumu_vs_u8mf4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector,
   return vwredsumu_vs_u8mf2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector,
   return vwredsumu_vs_u8m1_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector,
   return vwredsumu_vs_u8m2_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector,
   return vwredsumu_vs_u8m4_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector,
   return vwredsumu_vs_u8m8_u16m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector,
   return vwredsumu_vs_u16mf4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector,
   return vwredsumu_vs_u16mf2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector,
   return vwredsumu_vs_u16m1_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector,
   return vwredsumu_vs_u16m2_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector,
   return vwredsumu_vs_u16m4_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector,
   return vwredsumu_vs_u16m8_u32m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector,
   return vwredsumu_vs_u32mf2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector,
   return vwredsumu_vs_u32m1_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector,
   return vwredsumu_vs_u32m2_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector,
   return vwredsumu_vs_u32m4_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector,
   return vwredsumu_vs_u32m8_u64m1(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vwredsum_vs_i8mf8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -424,7 +386,6 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vwredsum_vs_i8mf4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -436,7 +397,6 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vwredsum_vs_i8mf2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -448,7 +408,6 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vwredsum_vs_i8m1_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -460,7 +419,6 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vwredsum_vs_i8m2_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -472,7 +430,6 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vwredsum_vs_i8m4_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +441,6 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst,
   return vwredsum_vs_i8m8_i16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -496,7 +452,6 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vwredsum_vs_i16mf4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -508,7 +463,6 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vwredsum_vs_i16mf2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -520,7 +474,6 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vwredsum_vs_i16m1_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -532,7 +485,6 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vwredsum_vs_i16m2_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +496,6 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vwredsum_vs_i16m4_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -556,7 +507,6 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst,
   return vwredsum_vs_i16m8_i32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -568,7 +518,6 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vwredsum_vs_i32mf2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -580,7 +529,6 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vwredsum_vs_i32m1_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -592,7 +540,6 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vwredsum_vs_i32m2_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +551,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vwredsum_vs_i32m4_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -616,7 +562,6 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst,
   return vwredsum_vs_i32m8_i64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -628,7 +573,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8mf8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -640,7 +584,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8mf4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -652,7 +595,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8mf2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -664,7 +606,6 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8m1_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -676,7 +617,6 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8m2_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -688,7 +628,6 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8m4_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -700,7 +639,6 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst,
   return vwredsumu_vs_u8m8_u16m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -712,7 +650,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vwredsumu_vs_u16mf4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -724,7 +661,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vwredsumu_vs_u16mf2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -736,7 +672,6 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vwredsumu_vs_u16m1_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -748,7 +683,6 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vwredsumu_vs_u16m2_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -760,7 +694,6 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vwredsumu_vs_u16m4_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -772,7 +705,6 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst,
   return vwredsumu_vs_u16m8_u32m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -784,7 +716,6 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vwredsumu_vs_u32mf2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -796,7 +727,6 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vwredsumu_vs_u32m1_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -808,7 +738,6 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vwredsumu_vs_u32m2_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -820,7 +749,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst,
   return vwredsumu_vs_u32m4_u64m1_m(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
index a4dff907a8b3f..aab1f15b73a03 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vwsub_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vwsub_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
   return vwsub_wv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
   return vwsub_wx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vwsub_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vwsub_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
   return vwsub_wv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
   return vwsub_wx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vwsub_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vwsub_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
   return vwsub_wv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
   return vwsub_wx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vwsub_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
   return vwsub_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
   return vwsub_wv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
   return vwsub_wx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vwsub_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vwsub_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
   return vwsub_wv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
   return vwsub_wx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vwsub_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
   return vwsub_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
   return vwsub_wv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
   return vwsub_wx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vwsub_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vwsub_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
   return vwsub_wv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
   return vwsub_wx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vwsub_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vwsub_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
   return vwsub_wv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
   return vwsub_wx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vwsub_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
   return vwsub_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
   return vwsub_wv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
   return vwsub_wx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vwsub_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
   return vwsub_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
   return vwsub_wv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
   return vwsub_wx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vwsub_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
   return vwsub_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
   return vwsub_wv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
   return vwsub_wx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vwsub_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vwsub_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
   return vwsub_wv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
   return vwsub_wx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vwsub_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
   return vwsub_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
   return vwsub_wv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
   return vwsub_wx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vwsub_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
   return vwsub_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
   return vwsub_wv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
   return vwsub_wx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vwsub_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vwsub_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
   return vwsub_wv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
   return vwsub_wx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
   return vwsubu_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
   return vwsubu_wv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -657,7 +592,6 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
   return vwsubu_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -667,7 +601,6 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -678,7 +611,6 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
   return vwsubu_wv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -688,7 +620,6 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -698,7 +629,6 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwsubu_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -708,7 +638,6 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -718,7 +647,6 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
   return vwsubu_wv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -728,7 +656,6 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -738,7 +665,6 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwsubu_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -748,7 +674,6 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -758,7 +683,6 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
   return vwsubu_wv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -768,7 +692,6 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -778,7 +701,6 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwsubu_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -788,7 +710,6 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -798,7 +719,6 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
   return vwsubu_wv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -808,7 +728,6 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -818,7 +737,6 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwsubu_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -828,7 +746,6 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -838,7 +755,6 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
   return vwsubu_wv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +764,6 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -859,7 +774,6 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
   return vwsubu_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +783,6 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
   return vwsubu_wv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
   return vwsubu_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -911,7 +821,6 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +830,6 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
   return vwsubu_wv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -941,7 +848,6 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwsubu_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -951,7 +857,6 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -961,7 +866,6 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
   return vwsubu_wv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -971,7 +875,6 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -981,7 +884,6 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwsubu_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -991,7 +893,6 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1001,7 +902,6 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
   return vwsubu_wv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +911,6 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1021,7 +920,6 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwsubu_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +929,6 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +938,6 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
   return vwsubu_wv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1051,7 +947,6 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1062,7 +957,6 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
   return vwsubu_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +966,6 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1082,7 +975,6 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
   return vwsubu_wv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1092,7 +984,6 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1102,7 +993,6 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwsubu_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1002,6 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1122,7 +1011,6 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
   return vwsubu_wv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1020,6 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1029,6 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwsubu_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1038,6 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1162,7 +1047,6 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
   return vwsubu_wv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1172,7 +1056,6 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1182,7 +1065,6 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwsubu_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1074,6 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1202,7 +1083,6 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
   return vwsubu_wv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1212,7 +1092,6 @@ vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1223,7 +1102,6 @@ vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1112,6 @@ vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1122,6 @@ vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwsub_wv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1132,6 @@ vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
   return vwsub_wx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1267,7 +1142,6 @@ vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1278,7 +1152,6 @@ vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1289,7 +1162,6 @@ vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwsub_wv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1300,7 +1172,6 @@ vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
   return vwsub_wx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1182,6 @@ vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1192,6 @@ vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1202,6 @@ vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwsub_wv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1212,6 @@ vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
   return vwsub_wx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1222,6 @@ vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1232,6 @@ vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1377,7 +1242,6 @@ vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwsub_wv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1388,7 +1252,6 @@ vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
   return vwsub_wx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1399,7 +1262,6 @@ vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1410,7 +1272,6 @@ vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1421,7 +1282,6 @@ vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwsub_wv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1292,6 @@ vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
   return vwsub_wx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1443,7 +1302,6 @@ vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1312,6 @@ vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1465,7 +1322,6 @@ vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwsub_wv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1332,6 @@ vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
   return vwsub_wx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1488,7 +1343,6 @@ vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1499,7 +1353,6 @@ vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1511,7 +1364,6 @@ vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwsub_wv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1522,7 +1374,6 @@ vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
   return vwsub_wx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1533,7 +1384,6 @@ vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1394,6 @@ vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1404,6 @@ vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwsub_wv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1566,7 +1414,6 @@ vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
   return vwsub_wx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1577,7 +1424,6 @@ vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1588,7 +1434,6 @@ vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1599,7 +1444,6 @@ vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwsub_wv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1610,7 +1454,6 @@ vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
   return vwsub_wx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1621,7 +1464,6 @@ vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1632,7 +1474,6 @@ vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1643,7 +1484,6 @@ vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwsub_wv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1494,6 @@ vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
   return vwsub_wx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1665,7 +1504,6 @@ vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1676,7 +1514,6 @@ vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1687,7 +1524,6 @@ vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwsub_wv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1698,7 +1534,6 @@ vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
   return vwsub_wx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1709,7 +1544,6 @@ vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1720,7 +1554,6 @@ vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1731,7 +1564,6 @@ vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwsub_wv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1742,7 +1574,6 @@ vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
   return vwsub_wx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1584,6 @@ vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1594,6 @@ vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1775,7 +1604,6 @@ vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwsub_wv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1786,7 +1614,6 @@ vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
   return vwsub_wx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1797,7 +1624,6 @@ vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1808,7 +1634,6 @@ vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1819,7 +1644,6 @@ vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwsub_wv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1830,7 +1654,6 @@ vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
   return vwsub_wx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1841,7 +1664,6 @@ vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1852,7 +1674,6 @@ vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1863,7 +1684,6 @@ vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwsub_wv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1874,7 +1694,6 @@ vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
   return vwsub_wx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1886,7 +1705,6 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwsubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1897,7 +1715,6 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwsubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1909,7 +1726,6 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwsubu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1920,7 +1736,6 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vwsubu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1932,7 +1747,6 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwsubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1943,7 +1757,6 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwsubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1955,7 +1768,6 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwsubu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1966,7 +1778,6 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vwsubu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1978,7 +1789,6 @@ vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwsubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1989,7 +1799,6 @@ vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwsubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2001,7 +1810,6 @@ vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwsubu_wv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2012,7 +1820,6 @@ vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vwsubu_wx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2023,7 +1830,6 @@ vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwsubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2034,7 +1840,6 @@ vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwsubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2045,7 +1850,6 @@ vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwsubu_wv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2056,7 +1860,6 @@ vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vwsubu_wx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2067,7 +1870,6 @@ vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwsubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2078,7 +1880,6 @@ vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwsubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2089,7 +1890,6 @@ vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwsubu_wv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2100,7 +1900,6 @@ vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vwsubu_wx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2111,7 +1910,6 @@ vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwsubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2122,7 +1920,6 @@ vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwsubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2133,7 +1930,6 @@ vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwsubu_wv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2144,7 +1940,6 @@ vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vwsubu_wx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2156,7 +1951,6 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwsubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2168,7 +1962,6 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwsubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2180,7 +1973,6 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwsubu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2192,7 +1984,6 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vwsubu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2204,7 +1995,6 @@ vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwsubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2215,7 +2005,6 @@ vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwsubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2227,7 +2016,6 @@ vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwsubu_wv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2238,7 +2026,6 @@ vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vwsubu_wx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2250,7 +2037,6 @@ vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwsubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2261,7 +2047,6 @@ vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwsubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2273,7 +2058,6 @@ vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwsubu_wv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2284,7 +2068,6 @@ vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vwsubu_wx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2296,7 +2079,6 @@ vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwsubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2307,7 +2089,6 @@ vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwsubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2319,7 +2100,6 @@ vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwsubu_wv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2330,7 +2110,6 @@ vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vwsubu_wx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2342,7 +2121,6 @@ vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwsubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2353,7 +2131,6 @@ vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwsubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2365,7 +2142,6 @@ vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwsubu_wv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2376,7 +2152,6 @@ vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vwsubu_wx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2388,7 +2163,6 @@ vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwsubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2399,7 +2173,6 @@ vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwsubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2411,7 +2184,6 @@ vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwsubu_wv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2422,7 +2194,6 @@ vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vwsubu_wx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2434,7 +2205,6 @@ vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwsubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2445,7 +2215,6 @@ vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwsubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2457,7 +2226,6 @@ vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwsubu_wv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2468,7 +2236,6 @@ vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vwsubu_wx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2480,7 +2247,6 @@ vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwsubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2491,7 +2257,6 @@ vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwsubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2503,7 +2268,6 @@ vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwsubu_wv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2514,7 +2278,6 @@ vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vwsubu_wx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2526,7 +2289,6 @@ vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vwsubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2537,7 +2299,6 @@ vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vwsubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2549,7 +2310,6 @@ vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vwsubu_wv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
index 5ac79293a1979..83c999710058e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vxor_vv_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vxor_vv_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vxor_vv_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vxor_vv_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vxor_vv_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vxor_vv_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vxor_vv_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vxor_vx_i8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vxor_vv_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vxor_vx_i16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vxor_vv_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vxor_vx_i16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vxor_vv_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vxor_vx_i16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vxor_vv_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vxor_vx_i16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vxor_vv_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vxor_vx_i16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vxor_vv_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vxor_vx_i16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vxor_vv_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vxor_vx_i32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vxor_vv_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vxor_vx_i32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vxor_vv_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vxor_vx_i32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vxor_vv_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vxor_vx_i32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vxor_vv_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vxor_vx_i32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vxor_vv_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vxor_vx_i64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vxor_vv_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vxor_vx_i64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vxor_vv_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vxor_vx_i64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vxor_vv_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vxor_vx_i64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vxor_vv_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8mf8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vxor_vv_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vxor_vv_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vxor_vv_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vxor_vv_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vxor_vv_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vxor_vv_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vxor_vx_u8m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vxor_vv_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vxor_vx_u16mf4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vxor_vv_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vxor_vx_u16mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vxor_vv_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vxor_vx_u16m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vxor_vv_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vxor_vx_u16m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vxor_vv_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vxor_vx_u16m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vxor_vv_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vxor_vx_u16m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vxor_vv_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vxor_vx_u32mf2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vxor_vv_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vxor_vx_u32m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vxor_vv_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vxor_vx_u32m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vxor_vv_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vxor_vx_u32m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vxor_vv_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vxor_vx_u32m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vxor_vv_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vxor_vx_u64m1(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vxor_vv_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vxor_vx_u64m2(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vxor_vv_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vxor_vx_u64m4(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vxor_vv_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vxor_vx_u64m8(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vxor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
   return vxor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vxor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
   return vxor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vxor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
   return vxor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vxor_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
   return vxor_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vxor_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
   return vxor_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vxor_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
   return vxor_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vxor_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
   return vxor_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vxor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
   return vxor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vxor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
   return vxor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vxor_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
   return vxor_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vxor_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
   return vxor_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vxor_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
   return vxor_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vxor_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
   return vxor_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vxor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
   return vxor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vxor_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
   return vxor_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vxor_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
   return vxor_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vxor_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
   return vxor_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vxor_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
   return vxor_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vxor_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
   return vxor_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vxor_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
   return vxor_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vxor_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
   return vxor_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vxor_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vxor_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vxor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
   return vxor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vxor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
   return vxor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vxor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
   return vxor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vxor_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
   return vxor_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vxor_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
   return vxor_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vxor_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
   return vxor_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vxor_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
   return vxor_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vxor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
   return vxor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vxor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
   return vxor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vxor_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
   return vxor_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vxor_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
   return vxor_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vxor_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
   return vxor_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vxor_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
   return vxor_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vxor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
   return vxor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vxor_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
   return vxor_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vxor_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
   return vxor_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vxor_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
   return vxor_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vxor_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
   return vxor_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vxor_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
   return vxor_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vxor_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
   return vxor_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vxor_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
   return vxor_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
   return vxor_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
index e26cd9e28ba23..234ef15bc9a2a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
   return vzext_vf2_u16mf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
   return vzext_vf2_u16mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
   return vzext_vf2_u16m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
   return vzext_vf2_u16m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
   return vzext_vf2_u16m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
   return vzext_vf2_u16m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
   return vzext_vf4_u32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
   return vzext_vf4_u32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
   return vzext_vf4_u32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
   return vzext_vf4_u32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
   return vzext_vf4_u32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
   return vzext_vf8_u64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
   return vzext_vf8_u64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
   return vzext_vf8_u64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
   return vzext_vf8_u64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
   return vzext_vf2_u32mf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
   return vzext_vf2_u32m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
   return vzext_vf2_u32m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
   return vzext_vf2_u32m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
   return vzext_vf2_u32m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
   return vzext_vf4_u64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
   return vzext_vf4_u64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
   return vzext_vf4_u64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
   return vzext_vf4_u64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
   return vzext_vf2_u64m1(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
   return vzext_vf2_u64m2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
   return vzext_vf2_u64m4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
   return vzext_vf2_u64m8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vzext_vf2_u16mf4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +276,6 @@ vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vzext_vf2_u16mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vzext_vf2_u16m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -328,7 +296,6 @@ vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vzext_vf2_u16m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -339,7 +306,6 @@ vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vzext_vf2_u16m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -350,7 +316,6 @@ vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vzext_vf2_u16m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -361,7 +326,6 @@ vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vzext_vf4_u32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -372,7 +336,6 @@ vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vzext_vf4_u32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -383,7 +346,6 @@ vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vzext_vf4_u32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -394,7 +356,6 @@ vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vzext_vf4_u32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -405,7 +366,6 @@ vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vzext_vf4_u32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -416,7 +376,6 @@ vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vzext_vf8_u64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -427,7 +386,6 @@ vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vzext_vf8_u64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -438,7 +396,6 @@ vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vzext_vf8_u64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -449,7 +406,6 @@ vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vzext_vf8_u64m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -460,7 +416,6 @@ vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vzext_vf2_u32mf2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -471,7 +426,6 @@ vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vzext_vf2_u32m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -482,7 +436,6 @@ vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vzext_vf2_u32m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -493,7 +446,6 @@ vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vzext_vf2_u32m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -504,7 +456,6 @@ vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vzext_vf2_u32m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -515,7 +466,6 @@ vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vzext_vf4_u64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -526,7 +476,6 @@ vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vzext_vf4_u64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -537,7 +486,6 @@ vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vzext_vf4_u64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +496,6 @@ vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
   return vzext_vf4_u64m8_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -559,7 +506,6 @@ vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vzext_vf2_u64m1_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -570,7 +516,6 @@ vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vzext_vf2_u64m2_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -581,7 +526,6 @@ vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vzext_vf2_u64m4_m(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])


        


More information about the cfe-commits mailing list