[clang] 92717c2 - [2/7][Clang][RISCV] Remove default tail-undisturbed for vslideup intrinsics

via cfe-commits cfe-commits at lists.llvm.org
Mon Jan 23 23:35:02 PST 2023


Author: eopXD
Date: 2023-01-23T23:34:57-08:00
New Revision: 92717c2ca8c53e51e99fd0a4adea8b56008bab53

URL: https://github.com/llvm/llvm-project/commit/92717c2ca8c53e51e99fd0a4adea8b56008bab53
DIFF: https://github.com/llvm/llvm-project/commit/92717c2ca8c53e51e99fd0a4adea8b56008bab53.diff

LOG: [2/7][Clang][RISCV] Remove default tail-undisturbed for vslideup intrinsics

The destination parameter is still needed as elements of
0 <  i < max(vstart, OFFSET) in vd remains unchanged.

The default policy for non-policy (implicit) vslideup intrinsics
will be tail agnostic and mask undisturbed.

This is the 2nd commit of a patch-set that aims to remove the
IsPrototypeDefaultTU special case for the rvv-intrinsics.

Please refer to the cover letter in the 1st commit (D140895) for an overview.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D140936

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 3bd463324db6b..623cace677c02 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -457,10 +457,21 @@ class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
   let HasMaskedOffOperand = false;
 }
 
+let UnMaskedPolicyScheme = HasPolicyOperand,
+    HasMaskedOffOperand = false,
+    IsPrototypeDefaultTU = false in {
+  multiclass RVVSlideUpBuiltinSet {
+    defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
+                               [["vx","v", "vvvz"]]>;
+    defm "" : RVVOutBuiltinSet<NAME, "csil",
+                               [["vx","Uv", "UvUvUvz"]]>;
+  }
+}
+
 let UnMaskedPolicyScheme = HasPolicyOperand,
     HasMaskedOffOperand = false,
     IsPrototypeDefaultTU = true in {
-  multiclass RVVSlideBuiltinSet {
+  multiclass RVVSlideDownBuiltinSet {
     defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
                                [["vx","v", "vvvz"]]>;
     defm "" : RVVOutBuiltinSet<NAME, "csil",
@@ -2190,9 +2201,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
 
 // 17.3. Vector Slide Instructions
 // 17.3.1. Vector Slideup Instructions
-defm vslideup   : RVVSlideBuiltinSet;
+defm vslideup   : RVVSlideUpBuiltinSet;
 // 17.3.2. Vector Slidedown Instructions
-defm vslidedown : RVVSlideBuiltinSet;
+defm vslidedown : RVVSlideDownBuiltinSet;
 
 // 17.3.3. Vector Slide1up Instructions
 let UnMaskedPolicyScheme = HasPassthruOperand in {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c
index 61cc2cce24c3c..813d9ecfe47ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c
@@ -9,7 +9,7 @@
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
@@ -18,7 +18,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, siz
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
@@ -27,7 +27,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
@@ -36,7 +36,7 @@ vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
@@ -45,7 +45,7 @@ vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
@@ -54,7 +54,7 @@ vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
@@ -63,7 +63,7 @@ vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
@@ -72,7 +72,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
@@ -81,7 +81,7 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
@@ -99,7 +99,7 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
@@ -108,7 +108,7 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
@@ -117,7 +117,7 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
@@ -126,7 +126,7 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
@@ -135,7 +135,7 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
@@ -144,7 +144,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
@@ -153,7 +153,7 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
@@ -162,7 +162,7 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
@@ -171,7 +171,7 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
@@ -180,7 +180,7 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
@@ -189,7 +189,7 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
@@ -198,7 +198,7 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
@@ -207,7 +207,7 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
@@ -216,7 +216,7 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t of
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
@@ -225,7 +225,7 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t of
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
@@ -234,7 +234,7 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
@@ -243,7 +243,7 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
@@ -252,7 +252,7 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
@@ -261,7 +261,7 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
@@ -270,7 +270,7 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t of
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
@@ -279,7 +279,7 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
@@ -288,7 +288,7 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
@@ -297,7 +297,7 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
@@ -306,7 +306,7 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
@@ -315,7 +315,7 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
@@ -324,7 +324,7 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
@@ -333,7 +333,7 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
@@ -342,7 +342,7 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
@@ -351,7 +351,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
@@ -360,7 +360,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
@@ -369,7 +369,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
@@ -378,7 +378,7 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
@@ -387,7 +387,7 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
@@ -396,7 +396,7 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
@@ -405,7 +405,7 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
@@ -414,7 +414,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
@@ -423,7 +423,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
@@ -432,7 +432,7 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
@@ -441,7 +441,7 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
@@ -450,7 +450,7 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
@@ -459,7 +459,7 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
@@ -468,7 +468,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
@@ -477,7 +477,7 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
@@ -486,7 +486,7 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
@@ -495,7 +495,7 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
@@ -504,7 +504,7 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
@@ -513,7 +513,7 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
@@ -522,7 +522,7 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
@@ -531,7 +531,7 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c
index 610e5b266af09..05472d4f782e8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c
@@ -9,7 +9,7 @@
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
@@ -18,7 +18,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, siz
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
@@ -27,7 +27,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
@@ -36,7 +36,7 @@ vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
@@ -45,7 +45,7 @@ vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
@@ -54,7 +54,7 @@ vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
@@ -63,7 +63,7 @@ vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
@@ -72,7 +72,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
@@ -81,7 +81,7 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
@@ -99,7 +99,7 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
@@ -108,7 +108,7 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
@@ -117,7 +117,7 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
@@ -126,7 +126,7 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
@@ -135,7 +135,7 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
@@ -144,7 +144,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
@@ -153,7 +153,7 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
@@ -162,7 +162,7 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
@@ -171,7 +171,7 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
@@ -180,7 +180,7 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
@@ -189,7 +189,7 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
@@ -198,7 +198,7 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
@@ -207,7 +207,7 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
@@ -216,7 +216,7 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t of
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
@@ -225,7 +225,7 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t of
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
@@ -234,7 +234,7 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
@@ -243,7 +243,7 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
@@ -252,7 +252,7 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
@@ -261,7 +261,7 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
@@ -270,7 +270,7 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t of
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
@@ -279,7 +279,7 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
@@ -288,7 +288,7 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
@@ -297,7 +297,7 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
@@ -306,7 +306,7 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
@@ -315,7 +315,7 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
@@ -324,7 +324,7 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
@@ -333,7 +333,7 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
@@ -342,7 +342,7 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
@@ -351,7 +351,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
@@ -360,7 +360,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
@@ -369,7 +369,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
@@ -378,7 +378,7 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
@@ -387,7 +387,7 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
@@ -396,7 +396,7 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
@@ -405,7 +405,7 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
@@ -414,7 +414,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
@@ -423,7 +423,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
@@ -432,7 +432,7 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
@@ -441,7 +441,7 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
@@ -450,7 +450,7 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
@@ -459,7 +459,7 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
@@ -468,7 +468,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
@@ -477,7 +477,7 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
@@ -486,7 +486,7 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
@@ -495,7 +495,7 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
@@ -504,7 +504,7 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
@@ -513,7 +513,7 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
@@ -522,7 +522,7 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
@@ -531,7 +531,7 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t off
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {


        


More information about the cfe-commits mailing list