[clang] 1d8bbaf - [RISCV][NFC] Fix clang test for vloxei/vluxei

Shao-Ce SUN via cfe-commits cfe-commits at lists.llvm.org
Tue Sep 21 20:27:53 PDT 2021


Author: Shao-Ce SUN
Date: 2021-09-22T11:27:41+08:00
New Revision: 1d8bbafed202cfae5ce31bea36072490f3d07960

URL: https://github.com/llvm/llvm-project/commit/1d8bbafed202cfae5ce31bea36072490f3d07960
DIFF: https://github.com/llvm/llvm-project/commit/1d8bbafed202cfae5ce31bea36072490f3d07960.diff

LOG: [RISCV][NFC] Fix clang test for vloxei/vluxei

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
index 132d855e38665..92d672dcac1a1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
@@ -6,7 +6,7 @@
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -16,7 +16,7 @@ vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
   return vloxei8_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -26,7 +26,7 @@ vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
   return vloxei8_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -36,7 +36,7 @@ vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
   return vloxei8_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -46,7 +46,7 @@ vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
   return vloxei8_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -56,7 +56,7 @@ vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
   return vloxei8_v_i8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -66,7 +66,7 @@ vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
   return vloxei8_v_i8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -76,7 +76,7 @@ vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
   return vloxei8_v_i8m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -86,7 +86,7 @@ vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
   return vloxei16_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -96,7 +96,7 @@ vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
   return vloxei16_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -106,7 +106,7 @@ vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -116,7 +116,7 @@ vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
   return vloxei16_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -126,7 +126,7 @@ vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
   return vloxei16_v_i8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -136,7 +136,7 @@ vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
   return vloxei16_v_i8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -146,7 +146,7 @@ vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
   return vloxei32_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -156,7 +156,7 @@ vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
   return vloxei32_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -166,7 +166,7 @@ vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -176,7 +176,7 @@ vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
   return vloxei32_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -186,7 +186,7 @@ vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
   return vloxei32_v_i8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -196,7 +196,7 @@ vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
   return vloxei64_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -206,7 +206,7 @@ vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
   return vloxei64_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -216,7 +216,7 @@ vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -226,7 +226,7 @@ vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
   return vloxei64_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -236,7 +236,7 @@ vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
   return vloxei8_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -246,7 +246,7 @@ vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
   return vloxei8_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -256,7 +256,7 @@ vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
   return vloxei8_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -266,7 +266,7 @@ vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
   return vloxei8_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -276,7 +276,7 @@ vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
   return vloxei8_v_i16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -286,7 +286,7 @@ vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
   return vloxei8_v_i16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -296,7 +296,7 @@ vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
   return vloxei16_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -306,7 +306,7 @@ vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
   return vloxei16_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -316,7 +316,7 @@ vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -326,7 +326,7 @@ vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -336,7 +336,7 @@ vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
   return vloxei16_v_i16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -346,7 +346,7 @@ vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
   return vloxei16_v_i16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -356,7 +356,7 @@ vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
   return vloxei32_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -366,7 +366,7 @@ vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
   return vloxei32_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -376,7 +376,7 @@ vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -386,7 +386,7 @@ vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -396,7 +396,7 @@ vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
   return vloxei32_v_i16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -406,7 +406,7 @@ vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
   return vloxei64_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -416,7 +416,7 @@ vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
   return vloxei64_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -426,7 +426,7 @@ vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -436,7 +436,7 @@ vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -446,7 +446,7 @@ vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
   return vloxei8_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -456,7 +456,7 @@ vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
   return vloxei8_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -466,7 +466,7 @@ vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
   return vloxei8_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -476,7 +476,7 @@ vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
   return vloxei8_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -486,7 +486,7 @@ vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
   return vloxei8_v_i32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -496,7 +496,7 @@ vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
   return vloxei16_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -506,7 +506,7 @@ vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
   return vloxei16_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -516,7 +516,7 @@ vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -526,7 +526,7 @@ vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -536,7 +536,7 @@ vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
   return vloxei16_v_i32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -546,7 +546,7 @@ vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
   return vloxei32_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -556,7 +556,7 @@ vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
   return vloxei32_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -566,7 +566,7 @@ vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -576,7 +576,7 @@ vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -586,7 +586,7 @@ vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
   return vloxei32_v_i32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -596,7 +596,7 @@ vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
   return vloxei64_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -606,7 +606,7 @@ vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
   return vloxei64_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -616,7 +616,7 @@ vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -626,7 +626,7 @@ vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -636,7 +636,7 @@ vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
   return vloxei8_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -646,7 +646,7 @@ vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
   return vloxei8_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -656,7 +656,7 @@ vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
   return vloxei8_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -666,7 +666,7 @@ vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
   return vloxei8_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -676,7 +676,7 @@ vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
   return vloxei16_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -686,7 +686,7 @@ vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
   return vloxei16_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -696,7 +696,7 @@ vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
   return vloxei16_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -706,7 +706,7 @@ vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -716,7 +716,7 @@ vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
   return vloxei32_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -726,7 +726,7 @@ vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
   return vloxei32_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -736,7 +736,7 @@ vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
   return vloxei32_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -746,7 +746,7 @@ vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -756,7 +756,7 @@ vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
   return vloxei64_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -766,7 +766,7 @@ vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
   return vloxei64_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -776,7 +776,7 @@ vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
   return vloxei64_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m8(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -786,7 +786,7 @@ vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -796,7 +796,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
   return vloxei8_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -806,7 +806,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
   return vloxei8_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -816,7 +816,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
   return vloxei8_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -826,7 +826,7 @@ vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
   return vloxei8_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -836,7 +836,7 @@ vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
   return vloxei8_v_u8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -846,7 +846,7 @@ vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
   return vloxei8_v_u8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -856,7 +856,7 @@ vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
   return vloxei8_v_u8m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -866,7 +866,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
   return vloxei16_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -876,7 +876,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
   return vloxei16_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -886,7 +886,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
   return vloxei16_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -896,7 +896,7 @@ vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
   return vloxei16_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -906,7 +906,7 @@ vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
   return vloxei16_v_u8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -916,7 +916,7 @@ vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
   return vloxei16_v_u8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -926,7 +926,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
   return vloxei32_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -936,7 +936,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
   return vloxei32_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -946,7 +946,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
   return vloxei32_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -956,7 +956,7 @@ vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
   return vloxei32_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -966,7 +966,7 @@ vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
   return vloxei32_v_u8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -976,7 +976,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
   return vloxei64_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -986,7 +986,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
   return vloxei64_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -996,7 +996,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
   return vloxei64_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1006,7 +1006,7 @@ vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
   return vloxei64_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1016,7 +1016,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
   return vloxei8_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1026,7 +1026,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
   return vloxei8_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1036,7 +1036,7 @@ vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
   return vloxei8_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1046,7 +1046,7 @@ vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
   return vloxei8_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1056,7 +1056,7 @@ vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
   return vloxei8_v_u16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1066,7 +1066,7 @@ vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
   return vloxei8_v_u16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +1076,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
   return vloxei16_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1086,7 +1086,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
   return vloxei16_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1096,7 +1096,7 @@ vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
   return vloxei16_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1106,7 +1106,7 @@ vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
   return vloxei16_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1116,7 +1116,7 @@ vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
   return vloxei16_v_u16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1126,7 +1126,7 @@ vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
   return vloxei16_v_u16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1136,7 +1136,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
   return vloxei32_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1146,7 +1146,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
   return vloxei32_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1156,7 @@ vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
   return vloxei32_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1166,7 +1166,7 @@ vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
   return vloxei32_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1176,7 +1176,7 @@ vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
   return vloxei32_v_u16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1186,7 +1186,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
   return vloxei64_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1196,7 +1196,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
   return vloxei64_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1206,7 +1206,7 @@ vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
   return vloxei64_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1216,7 +1216,7 @@ vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
   return vloxei64_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1226,7 +1226,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
   return vloxei8_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1236,7 @@ vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
   return vloxei8_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1246,7 +1246,7 @@ vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
   return vloxei8_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1256,7 @@ vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
   return vloxei8_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1266,7 +1266,7 @@ vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
   return vloxei8_v_u32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1276,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
   return vloxei16_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1286,7 @@ vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
   return vloxei16_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1296,7 +1296,7 @@ vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
   return vloxei16_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1306,7 +1306,7 @@ vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
   return vloxei16_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1316,7 +1316,7 @@ vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
   return vloxei16_v_u32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1326,7 +1326,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
   return vloxei32_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1336,7 +1336,7 @@ vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
   return vloxei32_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1346,7 +1346,7 @@ vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
   return vloxei32_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1356,7 +1356,7 @@ vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
   return vloxei32_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1366,7 @@ vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
   return vloxei32_v_u32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1376,7 +1376,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
   return vloxei64_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1386,7 @@ vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
   return vloxei64_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1396,7 +1396,7 @@ vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
   return vloxei64_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1406,7 +1406,7 @@ vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
   return vloxei64_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1416,7 +1416,7 @@ vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
   return vloxei8_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1426,7 +1426,7 @@ vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
   return vloxei8_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1436,7 +1436,7 @@ vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
   return vloxei8_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1446,7 @@ vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
   return vloxei8_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1456,7 +1456,7 @@ vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
   return vloxei16_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1466,7 +1466,7 @@ vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
   return vloxei16_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1476,7 @@ vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
   return vloxei16_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1486,7 +1486,7 @@ vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
   return vloxei16_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1496,7 +1496,7 @@ vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
   return vloxei32_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1506,7 +1506,7 @@ vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
   return vloxei32_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1516,7 +1516,7 @@ vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
   return vloxei32_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1526,7 +1526,7 @@ vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
   return vloxei32_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
   return vloxei64_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1546,7 +1546,7 @@ vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
   return vloxei64_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1556,7 +1556,7 @@ vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
   return vloxei64_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m8(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1566,7 +1566,7 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
   return vloxei64_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1576,7 +1576,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
   return vloxei8_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1586,7 +1586,7 @@ vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
   return vloxei8_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1596,7 +1596,7 @@ vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
   return vloxei8_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1606,7 @@ vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
   return vloxei8_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1616,7 +1616,7 @@ vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
   return vloxei8_v_f32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1626,7 +1626,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
   return vloxei16_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1636,7 +1636,7 @@ vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
   return vloxei16_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1646,7 +1646,7 @@ vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
   return vloxei16_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1656,7 +1656,7 @@ vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
   return vloxei16_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1666,7 +1666,7 @@ vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
   return vloxei16_v_f32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1676,7 +1676,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
   return vloxei32_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1686,7 +1686,7 @@ vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
   return vloxei32_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1696,7 +1696,7 @@ vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
   return vloxei32_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1706,7 +1706,7 @@ vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
   return vloxei32_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1716,7 +1716,7 @@ vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
   return vloxei32_v_f32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1726,7 +1726,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
   return vloxei64_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1736,7 +1736,7 @@ vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
   return vloxei64_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1746,7 +1746,7 @@ vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
   return vloxei64_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1756,7 +1756,7 @@ vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
   return vloxei64_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m1(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1766,7 +1766,7 @@ vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
   return vloxei8_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m2(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1776,7 @@ vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
   return vloxei8_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m4(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1786,7 +1786,7 @@ vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
   return vloxei8_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m8(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1796,7 @@ vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
   return vloxei8_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m1(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1806,7 +1806,7 @@ vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
   return vloxei16_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m2(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1816,7 +1816,7 @@ vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
   return vloxei16_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m4(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1826,7 +1826,7 @@ vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
   return vloxei16_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m8(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1836,7 +1836,7 @@ vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
   return vloxei16_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m1(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1846,7 +1846,7 @@ vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
   return vloxei32_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m2(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1856,7 @@ vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
   return vloxei32_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m4(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1866,7 +1866,7 @@ vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
   return vloxei32_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m8(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1876,7 +1876,7 @@ vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
   return vloxei32_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m1(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1886,7 +1886,7 @@ vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
   return vloxei64_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m2(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1896,7 +1896,7 @@ vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
   return vloxei64_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m4(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1906,7 +1906,7 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
   return vloxei64_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m8(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1916,7 +1916,7 @@ vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_
   return vloxei64_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1926,7 +1926,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const in
   return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1936,7 +1936,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const in
   return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1946,7 +1946,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const in
   return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1956,7 +1956,7 @@ vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t
   return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1966,7 +1966,7 @@ vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t
   return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1976,7 +1976,7 @@ vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t
   return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1986,7 +1986,7 @@ vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t
   return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1996,7 +1996,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i
   return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2006,7 +2006,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i
   return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2016,7 +2016,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i
   return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2026,7 +2026,7 @@ vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_
   return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2036,7 +2036,7 @@ vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_
   return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2046,7 +2046,7 @@ vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_
   return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2056,7 +2056,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i
   return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2066,7 +2066,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i
   return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2076,7 +2076,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i
   return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2086,7 +2086,7 @@ vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_
   return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2096,7 +2096,7 @@ vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_
   return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2106,7 +2106,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i
   return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2116,7 +2116,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i
   return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2126,7 +2126,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i
   return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2136,7 +2136,7 @@ vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_
   return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2146,7 +2146,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const
   return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2156,7 +2156,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const
   return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2166,7 +2166,7 @@ vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const in
   return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2176,7 +2176,7 @@ vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int
   return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2186,7 +2186,7 @@ vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int
   return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2196,7 +2196,7 @@ vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int
   return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2206,7 +2206,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons
   return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2216,7 +2216,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons
   return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2226,7 +2226,7 @@ vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i
   return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2236,7 +2236,7 @@ vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in
   return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2246,7 +2246,7 @@ vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in
   return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2256,7 +2256,7 @@ vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const in
   return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2266,7 +2266,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons
   return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2276,7 +2276,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons
   return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2286,7 +2286,7 @@ vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i
   return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2296,7 +2296,7 @@ vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in
   return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2306,7 +2306,7 @@ vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in
   return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2316,7 +2316,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons
   return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2326,7 +2326,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons
   return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2336,7 +2336,7 @@ vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i
   return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2346,7 +2346,7 @@ vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in
   return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2356,7 +2356,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const
   return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2366,7 +2366,7 @@ vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const in
   return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2376,7 +2376,7 @@ vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const in
   return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2386,7 +2386,7 @@ vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int
   return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2396,7 +2396,7 @@ vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int
   return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2406,7 +2406,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons
   return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2416,7 +2416,7 @@ vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i
   return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2426,7 +2426,7 @@ vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i
   return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2436,7 +2436,7 @@ vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in
   return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2446,7 +2446,7 @@ vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in
   return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2456,7 +2456,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons
   return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2466,7 +2466,7 @@ vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i
   return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2476,7 +2476,7 @@ vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i
   return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2486,7 +2486,7 @@ vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in
   return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2496,7 +2496,7 @@ vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in
   return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2506,7 +2506,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons
   return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2516,7 +2516,7 @@ vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i
   return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2526,7 +2526,7 @@ vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i
   return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2536,7 +2536,7 @@ vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in
   return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2546,7 +2546,7 @@ vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const in
   return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2556,7 +2556,7 @@ vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const in
   return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2566,7 +2566,7 @@ vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const in
   return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2576,7 +2576,7 @@ vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int
   return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2586,7 +2586,7 @@ vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i
   return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2596,7 +2596,7 @@ vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i
   return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2606,7 +2606,7 @@ vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i
   return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2616,7 +2616,7 @@ vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in
   return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2626,7 +2626,7 @@ vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i
   return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2636,7 +2636,7 @@ vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i
   return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2646,7 +2646,7 @@ vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i
   return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2656,7 +2656,7 @@ vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in
   return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2666,7 +2666,7 @@ vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i
   return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2676,7 +2676,7 @@ vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i
   return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2686,7 +2686,7 @@ vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i
   return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2696,7 +2696,7 @@ vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in
   return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2706,7 +2706,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2716,7 +2716,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2726,7 +2726,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2736,7 +2736,7 @@ vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint
   return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2746,7 +2746,7 @@ vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint
   return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2756,7 +2756,7 @@ vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint
   return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2766,7 +2766,7 @@ vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint
   return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2776,7 +2776,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2786,7 +2786,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2796,7 +2796,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2806,7 +2806,7 @@ vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin
   return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2816,7 +2816,7 @@ vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin
   return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2826,7 +2826,7 @@ vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uin
   return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2836,7 +2836,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2846,7 +2846,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2856,7 +2856,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2866,7 +2866,7 @@ vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin
   return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2876,7 +2876,7 @@ vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin
   return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2886,7 +2886,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2896,7 +2896,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2906,7 +2906,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2916,7 +2916,7 @@ vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin
   return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2926,7 +2926,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, con
   return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2936,7 +2936,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, con
   return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2946,7 +2946,7 @@ vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2956,7 +2956,7 @@ vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const u
   return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2966,7 +2966,7 @@ vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const u
   return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2976,7 +2976,7 @@ vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const u
   return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2986,7 +2986,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co
   return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2996,7 +2996,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co
   return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3006,7 +3006,7 @@ vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3016,7 +3016,7 @@ vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const
   return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3026,7 +3026,7 @@ vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const
   return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3036,7 +3036,7 @@ vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const
   return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3046,7 +3046,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co
   return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3056,7 +3056,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co
   return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3066,7 +3066,7 @@ vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3076,7 +3076,7 @@ vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const
   return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3086,7 +3086,7 @@ vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const
   return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3096,7 +3096,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co
   return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3106,7 +3106,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co
   return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3116,7 +3116,7 @@ vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3126,7 +3126,7 @@ vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const
   return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3136,7 +3136,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, con
   return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3146,7 +3146,7 @@ vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3156,7 +3156,7 @@ vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3166,7 +3166,7 @@ vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const u
   return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3176,7 +3176,7 @@ vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const u
   return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3186,7 +3186,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co
   return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3196,7 +3196,7 @@ vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3206,7 +3206,7 @@ vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3216,7 +3216,7 @@ vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const
   return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3226,7 +3226,7 @@ vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const
   return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3236,7 +3236,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co
   return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3246,7 +3246,7 @@ vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3256,7 +3256,7 @@ vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3266,7 +3266,7 @@ vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const
   return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3276,7 +3276,7 @@ vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const
   return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3286,7 +3286,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co
   return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3296,7 +3296,7 @@ vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3306,7 +3306,7 @@ vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3316,7 +3316,7 @@ vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const
   return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3326,7 +3326,7 @@ vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3336,7 +3336,7 @@ vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3346,7 +3346,7 @@ vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3356,7 +3356,7 @@ vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const u
   return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3366,7 +3366,7 @@ vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3376,7 +3376,7 @@ vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3386,7 +3386,7 @@ vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3396,7 +3396,7 @@ vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const
   return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3406,7 +3406,7 @@ vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3416,7 +3416,7 @@ vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3426,7 +3426,7 @@ vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3436,7 +3436,7 @@ vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const
   return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3446,7 +3446,7 @@ vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3456,7 +3456,7 @@ vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3466,7 +3466,7 @@ vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3476,7 +3476,7 @@ vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const
   return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3486,7 +3486,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, c
   return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3496,7 +3496,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, cons
   return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3506,7 +3506,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, cons
   return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3516,7 +3516,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const
   return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3526,7 +3526,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const
   return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3536,7 +3536,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3546,7 +3546,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con
   return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3556,7 +3556,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con
   return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3566,7 +3566,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3576,7 +3576,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons
   return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3586,7 +3586,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3596,7 +3596,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con
   return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3606,7 +3606,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con
   return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3616,7 +3616,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3626,7 +3626,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons
   return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3636,7 +3636,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3646,7 +3646,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con
   return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3656,7 +3656,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con
   return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3666,7 +3666,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3676,7 +3676,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, cons
   return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3686,7 +3686,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, cons
   return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3696,7 +3696,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, cons
   return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3706,7 +3706,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const
   return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3716,7 +3716,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con
   return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3726,7 +3726,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con
   return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3736,7 +3736,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con
   return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3746,7 +3746,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons
   return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3756,7 +3756,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con
   return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3766,7 +3766,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con
   return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3776,7 +3776,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con
   return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3786,7 +3786,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons
   return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3796,7 +3796,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con
   return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3806,7 +3806,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con
   return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3816,7 +3816,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con
   return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
index ece36400659e7..e59c3368e925a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
@@ -6,7 +6,7 @@
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -16,7 +16,7 @@ vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
   return vluxei8_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -26,7 +26,7 @@ vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
   return vluxei8_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -36,7 +36,7 @@ vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
   return vluxei8_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -46,7 +46,7 @@ vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
   return vluxei8_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -56,7 +56,7 @@ vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
   return vluxei8_v_i8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -66,7 +66,7 @@ vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
   return vluxei8_v_i8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -76,7 +76,7 @@ vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
   return vluxei8_v_i8m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -86,7 +86,7 @@ vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
   return vluxei16_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -96,7 +96,7 @@ vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
   return vluxei16_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -106,7 +106,7 @@ vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -116,7 +116,7 @@ vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
   return vluxei16_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -126,7 +126,7 @@ vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
   return vluxei16_v_i8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -136,7 +136,7 @@ vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
   return vluxei16_v_i8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -146,7 +146,7 @@ vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
   return vluxei32_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -156,7 +156,7 @@ vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
   return vluxei32_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -166,7 +166,7 @@ vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -176,7 +176,7 @@ vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
   return vluxei32_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -186,7 +186,7 @@ vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
   return vluxei32_v_i8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf8(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -196,7 +196,7 @@ vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
   return vluxei64_v_i8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -206,7 +206,7 @@ vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
   return vluxei64_v_i8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -216,7 +216,7 @@ vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_i8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -226,7 +226,7 @@ vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
   return vluxei64_v_i8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -236,7 +236,7 @@ vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
   return vluxei8_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -246,7 +246,7 @@ vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
   return vluxei8_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -256,7 +256,7 @@ vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
   return vluxei8_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -266,7 +266,7 @@ vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
   return vluxei8_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -276,7 +276,7 @@ vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
   return vluxei8_v_i16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -286,7 +286,7 @@ vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
   return vluxei8_v_i16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -296,7 +296,7 @@ vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
   return vluxei16_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -306,7 +306,7 @@ vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
   return vluxei16_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -316,7 +316,7 @@ vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -326,7 +326,7 @@ vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -336,7 +336,7 @@ vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
   return vluxei16_v_i16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -346,7 +346,7 @@ vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
   return vluxei16_v_i16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -356,7 +356,7 @@ vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
   return vluxei32_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -366,7 +366,7 @@ vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
   return vluxei32_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -376,7 +376,7 @@ vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -386,7 +386,7 @@ vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -396,7 +396,7 @@ vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
   return vluxei32_v_i16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -406,7 +406,7 @@ vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
   return vluxei64_v_i16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -416,7 +416,7 @@ vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
   return vluxei64_v_i16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -426,7 +426,7 @@ vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_i16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -436,7 +436,7 @@ vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_i16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -446,7 +446,7 @@ vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
   return vluxei8_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -456,7 +456,7 @@ vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
   return vluxei8_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -466,7 +466,7 @@ vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
   return vluxei8_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -476,7 +476,7 @@ vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
   return vluxei8_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -486,7 +486,7 @@ vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
   return vluxei8_v_i32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -496,7 +496,7 @@ vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
   return vluxei16_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -506,7 +506,7 @@ vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
   return vluxei16_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -516,7 +516,7 @@ vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -526,7 +526,7 @@ vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -536,7 +536,7 @@ vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
   return vluxei16_v_i32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -546,7 +546,7 @@ vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
   return vluxei32_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -556,7 +556,7 @@ vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
   return vluxei32_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -566,7 +566,7 @@ vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -576,7 +576,7 @@ vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -586,7 +586,7 @@ vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
   return vluxei32_v_i32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -596,7 +596,7 @@ vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
   return vluxei64_v_i32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -606,7 +606,7 @@ vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
   return vluxei64_v_i32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -616,7 +616,7 @@ vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_i32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -626,7 +626,7 @@ vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_i32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -636,7 +636,7 @@ vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
   return vluxei8_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -646,7 +646,7 @@ vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
   return vluxei8_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -656,7 +656,7 @@ vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
   return vluxei8_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -666,7 +666,7 @@ vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
   return vluxei8_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -676,7 +676,7 @@ vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
   return vluxei16_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -686,7 +686,7 @@ vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
   return vluxei16_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -696,7 +696,7 @@ vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
   return vluxei16_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -706,7 +706,7 @@ vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -716,7 +716,7 @@ vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
   return vluxei32_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -726,7 +726,7 @@ vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
   return vluxei32_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -736,7 +736,7 @@ vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
   return vluxei32_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -746,7 +746,7 @@ vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -756,7 +756,7 @@ vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
   return vluxei64_v_i64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -766,7 +766,7 @@ vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
   return vluxei64_v_i64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -776,7 +776,7 @@ vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
   return vluxei64_v_i64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m8(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -786,7 +786,7 @@ vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_i64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -796,7 +796,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
   return vluxei8_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -806,7 +806,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
   return vluxei8_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -816,7 +816,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
   return vluxei8_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -826,7 +826,7 @@ vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
   return vluxei8_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -836,7 +836,7 @@ vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
   return vluxei8_v_u8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -846,7 +846,7 @@ vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
   return vluxei8_v_u8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -856,7 +856,7 @@ vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
   return vluxei8_v_u8m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -866,7 +866,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
   return vluxei16_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -876,7 +876,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
   return vluxei16_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -886,7 +886,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
   return vluxei16_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -896,7 +896,7 @@ vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
   return vluxei16_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -906,7 +906,7 @@ vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
   return vluxei16_v_u8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -916,7 +916,7 @@ vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
   return vluxei16_v_u8m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -926,7 +926,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
   return vluxei32_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -936,7 +936,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
   return vluxei32_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -946,7 +946,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
   return vluxei32_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -956,7 +956,7 @@ vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
   return vluxei32_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -966,7 +966,7 @@ vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
   return vluxei32_v_u8m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf8(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -976,7 +976,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
   return vluxei64_v_u8mf8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -986,7 +986,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
   return vluxei64_v_u8mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -996,7 +996,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
   return vluxei64_v_u8mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1006,7 +1006,7 @@ vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
   return vluxei64_v_u8m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1016,7 +1016,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
   return vluxei8_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1026,7 +1026,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
   return vluxei8_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1036,7 +1036,7 @@ vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
   return vluxei8_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1046,7 +1046,7 @@ vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
   return vluxei8_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1056,7 +1056,7 @@ vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
   return vluxei8_v_u16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1066,7 +1066,7 @@ vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
   return vluxei8_v_u16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +1076,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
   return vluxei16_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1086,7 +1086,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
   return vluxei16_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1096,7 +1096,7 @@ vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
   return vluxei16_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1106,7 +1106,7 @@ vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
   return vluxei16_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1116,7 +1116,7 @@ vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
   return vluxei16_v_u16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1126,7 +1126,7 @@ vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
   return vluxei16_v_u16m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1136,7 +1136,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
   return vluxei32_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1146,7 +1146,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
   return vluxei32_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1156,7 @@ vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
   return vluxei32_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1166,7 +1166,7 @@ vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
   return vluxei32_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1176,7 +1176,7 @@ vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
   return vluxei32_v_u16m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1186,7 +1186,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
   return vluxei64_v_u16mf4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1196,7 +1196,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
   return vluxei64_v_u16mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1206,7 +1206,7 @@ vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
   return vluxei64_v_u16m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1216,7 +1216,7 @@ vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
   return vluxei64_v_u16m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1226,7 +1226,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
   return vluxei8_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1236,7 @@ vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
   return vluxei8_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1246,7 +1246,7 @@ vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
   return vluxei8_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1256,7 @@ vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
   return vluxei8_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1266,7 +1266,7 @@ vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
   return vluxei8_v_u32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1276,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
   return vluxei16_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1286,7 +1286,7 @@ vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
   return vluxei16_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1296,7 +1296,7 @@ vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
   return vluxei16_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1306,7 +1306,7 @@ vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
   return vluxei16_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1316,7 +1316,7 @@ vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
   return vluxei16_v_u32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1326,7 +1326,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
   return vluxei32_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1336,7 +1336,7 @@ vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
   return vluxei32_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1346,7 +1346,7 @@ vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
   return vluxei32_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1356,7 +1356,7 @@ vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
   return vluxei32_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1366,7 @@ vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
   return vluxei32_v_u32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1376,7 +1376,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
   return vluxei64_v_u32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1386,7 @@ vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
   return vluxei64_v_u32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1396,7 +1396,7 @@ vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
   return vluxei64_v_u32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1406,7 +1406,7 @@ vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
   return vluxei64_v_u32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1416,7 +1416,7 @@ vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
   return vluxei8_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1426,7 +1426,7 @@ vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
   return vluxei8_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1436,7 +1436,7 @@ vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
   return vluxei8_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1446,7 @@ vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
   return vluxei8_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1456,7 +1456,7 @@ vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
   return vluxei16_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1466,7 +1466,7 @@ vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
   return vluxei16_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1476,7 +1476,7 @@ vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
   return vluxei16_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1486,7 +1486,7 @@ vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
   return vluxei16_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1496,7 +1496,7 @@ vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
   return vluxei32_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1506,7 +1506,7 @@ vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
   return vluxei32_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1516,7 +1516,7 @@ vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
   return vluxei32_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1526,7 +1526,7 @@ vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
   return vluxei32_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
   return vluxei64_v_u64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1546,7 +1546,7 @@ vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
   return vluxei64_v_u64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1556,7 +1556,7 @@ vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
   return vluxei64_v_u64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m8(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1566,7 +1566,7 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
   return vluxei64_v_u64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1576,7 +1576,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
   return vluxei8_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1586,7 +1586,7 @@ vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
   return vluxei8_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1596,7 +1596,7 @@ vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
   return vluxei8_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1606,7 @@ vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
   return vluxei8_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1616,7 +1616,7 @@ vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
   return vluxei8_v_f32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1626,7 +1626,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
   return vluxei16_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1636,7 +1636,7 @@ vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
   return vluxei16_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1646,7 +1646,7 @@ vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
   return vluxei16_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1656,7 +1656,7 @@ vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
   return vluxei16_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1666,7 +1666,7 @@ vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
   return vluxei16_v_f32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1676,7 +1676,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
   return vluxei32_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1686,7 +1686,7 @@ vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
   return vluxei32_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1696,7 +1696,7 @@ vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
   return vluxei32_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1706,7 +1706,7 @@ vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
   return vluxei32_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1716,7 +1716,7 @@ vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
   return vluxei32_v_f32m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32mf2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1726,7 +1726,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
   return vluxei64_v_f32mf2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1736,7 +1736,7 @@ vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
   return vluxei64_v_f32m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1746,7 +1746,7 @@ vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
   return vluxei64_v_f32m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1756,7 +1756,7 @@ vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
   return vluxei64_v_f32m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m1(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1766,7 +1766,7 @@ vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
   return vluxei8_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m2(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1776,7 @@ vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
   return vluxei8_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m4(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1786,7 +1786,7 @@ vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
   return vluxei8_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m8(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1796,7 @@ vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
   return vluxei8_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m1(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1806,7 +1806,7 @@ vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
   return vluxei16_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m2(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1816,7 +1816,7 @@ vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
   return vluxei16_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m4(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1826,7 +1826,7 @@ vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
   return vluxei16_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m8(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1836,7 +1836,7 @@ vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
   return vluxei16_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m1(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1846,7 +1846,7 @@ vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
   return vluxei32_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m2(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1856,7 @@ vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
   return vluxei32_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m4(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1866,7 +1866,7 @@ vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
   return vluxei32_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m8(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1876,7 +1876,7 @@ vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
   return vluxei32_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m1(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1886,7 +1886,7 @@ vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
   return vluxei64_v_f64m1(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m2(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1896,7 +1896,7 @@ vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
   return vluxei64_v_f64m2(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m4(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1906,7 +1906,7 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
   return vluxei64_v_f64m4(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m8(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
@@ -1916,7 +1916,7 @@ vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_
   return vluxei64_v_f64m8(base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1926,7 +1926,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const in
   return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1936,7 +1936,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const in
   return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1946,7 +1946,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const in
   return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1956,7 +1956,7 @@ vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t
   return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1966,7 +1966,7 @@ vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t
   return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1976,7 +1976,7 @@ vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t
   return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i8m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1986,7 +1986,7 @@ vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t
   return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1996,7 +1996,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i
   return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2006,7 +2006,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i
   return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2016,7 +2016,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i
   return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2026,7 +2026,7 @@ vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_
   return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2036,7 +2036,7 @@ vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_
   return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i8m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2046,7 +2046,7 @@ vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_
   return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2056,7 +2056,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i
   return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2066,7 +2066,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i
   return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2076,7 +2076,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i
   return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2086,7 +2086,7 @@ vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_
   return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i8m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2096,7 +2096,7 @@ vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_
   return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2106,7 +2106,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i
   return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2116,7 +2116,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i
   return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2126,7 +2126,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i
   return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2136,7 +2136,7 @@ vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_
   return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2146,7 +2146,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const
   return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2156,7 +2156,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const
   return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2166,7 +2166,7 @@ vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const in
   return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2176,7 +2176,7 @@ vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int
   return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2186,7 +2186,7 @@ vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int
   return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i16m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2196,7 +2196,7 @@ vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int
   return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2206,7 +2206,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons
   return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2216,7 +2216,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons
   return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2226,7 +2226,7 @@ vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i
   return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2236,7 +2236,7 @@ vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in
   return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2246,7 +2246,7 @@ vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in
   return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i16m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2256,7 +2256,7 @@ vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const in
   return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2266,7 +2266,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons
   return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2276,7 +2276,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons
   return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2286,7 +2286,7 @@ vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i
   return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2296,7 +2296,7 @@ vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in
   return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i16m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2306,7 +2306,7 @@ vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in
   return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2316,7 +2316,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons
   return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2326,7 +2326,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons
   return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2336,7 +2336,7 @@ vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i
   return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2346,7 +2346,7 @@ vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in
   return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2356,7 +2356,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const
   return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2366,7 +2366,7 @@ vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const in
   return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2376,7 +2376,7 @@ vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const in
   return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2386,7 +2386,7 @@ vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int
   return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2396,7 +2396,7 @@ vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int
   return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2406,7 +2406,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons
   return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2416,7 +2416,7 @@ vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i
   return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2426,7 +2426,7 @@ vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i
   return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2436,7 +2436,7 @@ vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in
   return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2446,7 +2446,7 @@ vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in
   return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2456,7 +2456,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons
   return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2466,7 +2466,7 @@ vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i
   return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2476,7 +2476,7 @@ vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i
   return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2486,7 +2486,7 @@ vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in
   return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2496,7 +2496,7 @@ vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in
   return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2506,7 +2506,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons
   return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2516,7 +2516,7 @@ vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i
   return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2526,7 +2526,7 @@ vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i
   return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2536,7 +2536,7 @@ vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in
   return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2546,7 +2546,7 @@ vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const in
   return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2556,7 +2556,7 @@ vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const in
   return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2566,7 +2566,7 @@ vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const in
   return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2576,7 +2576,7 @@ vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int
   return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2586,7 +2586,7 @@ vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i
   return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2596,7 +2596,7 @@ vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i
   return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2606,7 +2606,7 @@ vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i
   return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2616,7 +2616,7 @@ vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in
   return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2626,7 +2626,7 @@ vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i
   return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2636,7 +2636,7 @@ vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i
   return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2646,7 +2646,7 @@ vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i
   return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2656,7 +2656,7 @@ vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in
   return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2666,7 +2666,7 @@ vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i
   return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2676,7 +2676,7 @@ vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i
   return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2686,7 +2686,7 @@ vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i
   return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_i64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2696,7 +2696,7 @@ vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in
   return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2706,7 +2706,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2716,7 +2716,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2726,7 +2726,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2736,7 +2736,7 @@ vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint
   return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2746,7 +2746,7 @@ vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint
   return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2756,7 +2756,7 @@ vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint
   return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u8m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2766,7 +2766,7 @@ vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint
   return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2776,7 +2776,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2786,7 +2786,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2796,7 +2796,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2806,7 +2806,7 @@ vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin
   return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2816,7 +2816,7 @@ vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin
   return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u8m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2826,7 +2826,7 @@ vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uin
   return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2836,7 +2836,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2846,7 +2846,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2856,7 +2856,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2866,7 +2866,7 @@ vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin
   return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u8m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2876,7 +2876,7 @@ vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin
   return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2886,7 +2886,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const
   return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2896,7 +2896,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const
   return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2906,7 +2906,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const
   return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u8m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2916,7 +2916,7 @@ vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin
   return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2926,7 +2926,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, con
   return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2936,7 +2936,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, con
   return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2946,7 +2946,7 @@ vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2956,7 +2956,7 @@ vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const u
   return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2966,7 +2966,7 @@ vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const u
   return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u16m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2976,7 +2976,7 @@ vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const u
   return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2986,7 +2986,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co
   return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2996,7 +2996,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co
   return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3006,7 +3006,7 @@ vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3016,7 +3016,7 @@ vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const
   return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3026,7 +3026,7 @@ vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const
   return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u16m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3036,7 +3036,7 @@ vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const
   return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3046,7 +3046,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co
   return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3056,7 +3056,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co
   return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3066,7 +3066,7 @@ vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3076,7 +3076,7 @@ vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const
   return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u16m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3086,7 +3086,7 @@ vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const
   return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3096,7 +3096,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co
   return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3106,7 +3106,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co
   return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3116,7 +3116,7 @@ vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const
   return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u16m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3126,7 +3126,7 @@ vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const
   return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3136,7 +3136,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, con
   return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3146,7 +3146,7 @@ vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3156,7 +3156,7 @@ vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3166,7 +3166,7 @@ vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const u
   return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3176,7 +3176,7 @@ vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const u
   return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3186,7 +3186,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co
   return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3196,7 +3196,7 @@ vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3206,7 +3206,7 @@ vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3216,7 +3216,7 @@ vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const
   return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3226,7 +3226,7 @@ vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const
   return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3236,7 +3236,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co
   return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3246,7 +3246,7 @@ vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3256,7 +3256,7 @@ vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3266,7 +3266,7 @@ vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const
   return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3276,7 +3276,7 @@ vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const
   return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3286,7 +3286,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co
   return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3296,7 +3296,7 @@ vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const
   return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3306,7 +3306,7 @@ vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const
   return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3316,7 +3316,7 @@ vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const
   return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3326,7 +3326,7 @@ vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3336,7 +3336,7 @@ vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3346,7 +3346,7 @@ vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3356,7 +3356,7 @@ vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const u
   return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3366,7 +3366,7 @@ vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3376,7 +3376,7 @@ vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3386,7 +3386,7 @@ vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3396,7 +3396,7 @@ vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const
   return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3406,7 +3406,7 @@ vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3416,7 +3416,7 @@ vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3426,7 +3426,7 @@ vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3436,7 +3436,7 @@ vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const
   return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3446,7 +3446,7 @@ vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const
   return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3456,7 +3456,7 @@ vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const
   return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3466,7 +3466,7 @@ vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const
   return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_u64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3476,7 +3476,7 @@ vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const
   return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3486,7 +3486,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, c
   return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3496,7 +3496,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, cons
   return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3506,7 +3506,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, cons
   return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3516,7 +3516,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const
   return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3526,7 +3526,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const
   return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3536,7 +3536,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3546,7 +3546,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con
   return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3556,7 +3556,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con
   return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3566,7 +3566,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3576,7 +3576,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons
   return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3586,7 +3586,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3596,7 +3596,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con
   return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3606,7 +3606,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con
   return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3616,7 +3616,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f32m8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3626,7 +3626,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons
   return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3636,7 +3636,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
   return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3646,7 +3646,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con
   return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3656,7 +3656,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con
   return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f32m4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3666,7 +3666,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons
   return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3676,7 +3676,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, cons
   return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3686,7 +3686,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, cons
   return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3696,7 +3696,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, cons
   return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei8_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3706,7 +3706,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const
   return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3716,7 +3716,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con
   return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3726,7 +3726,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con
   return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3736,7 +3736,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con
   return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei16_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3746,7 +3746,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons
   return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3756,7 +3756,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con
   return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3766,7 +3766,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con
   return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3776,7 +3776,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con
   return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei32_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3786,7 +3786,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons
   return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m1_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3796,7 +3796,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con
   return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m2_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3806,7 +3806,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con
   return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m4_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -3816,7 +3816,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con
   return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
-// CHECK-RV64-LABEL: @testuxei64_v_f64m8_m(
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])


        


More information about the cfe-commits mailing list