[clang] 8ac6284 - [Clang][RISCV][NFC] Re-run update_cc_test_checks.py on riscv-rvv-vls-*.c

Jim Lin via cfe-commits cfe-commits at lists.llvm.org
Mon Aug 28 20:37:03 PDT 2023


Author: Jim Lin
Date: 2023-08-29T11:35:14+08:00
New Revision: 8ac62845ff4e7712cc224cf0350c7d14efc22365

URL: https://github.com/llvm/llvm-project/commit/8ac62845ff4e7712cc224cf0350c7d14efc22365
DIFF: https://github.com/llvm/llvm-project/commit/8ac62845ff4e7712cc224cf0350c7d14efc22365.diff

LOG: [Clang][RISCV][NFC] Re-run update_cc_test_checks.py on riscv-rvv-vls-*.c

IR names have been changed from https://reviews.llvm.org/D155220.

Added: 
    

Modified: 
    clang/test/CodeGen/riscv-rvv-vls-arith-ops.c
    clang/test/CodeGen/riscv-rvv-vls-bitwise-ops.c
    clang/test/CodeGen/riscv-rvv-vls-compare-ops.c
    clang/test/CodeGen/riscv-rvv-vls-shift-ops.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/riscv-rvv-vls-arith-ops.c b/clang/test/CodeGen/riscv-rvv-vls-arith-ops.c
index a090e8b9bb9585..76fcf38a0d98f0 100644
--- a/clang/test/CodeGen/riscv-rvv-vls-arith-ops.c
+++ b/clang/test/CodeGen/riscv-rvv-vls-arith-ops.c
@@ -39,8 +39,8 @@ typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__ri
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t add_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a + b;
@@ -51,8 +51,8 @@ fixed_int8m1_t add_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t add_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a + b;
@@ -63,8 +63,8 @@ fixed_int16m1_t add_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t add_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a + b;
@@ -75,8 +75,8 @@ fixed_int32m1_t add_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t add_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a + b;
@@ -87,8 +87,8 @@ fixed_int64m1_t add_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t add_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a + b;
@@ -99,8 +99,8 @@ fixed_uint8m1_t add_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t add_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a + b;
@@ -111,8 +111,8 @@ fixed_uint16m1_t add_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t add_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a + b;
@@ -123,8 +123,8 @@ fixed_uint32m1_t add_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t add_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a + b;
@@ -135,8 +135,8 @@ fixed_uint64m1_t add_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = fadd <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t add_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a + b;
@@ -147,8 +147,8 @@ fixed_float32m1_t add_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = fadd <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t add_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a + b;
@@ -159,8 +159,8 @@ fixed_float64m1_t add_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t add_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a += b;
@@ -171,8 +171,8 @@ fixed_int8m1_t add_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t add_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a += b;
@@ -183,8 +183,8 @@ fixed_int16m1_t add_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t add_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a += b;
@@ -195,8 +195,8 @@ fixed_int32m1_t add_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t add_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a += b;
@@ -207,8 +207,8 @@ fixed_int64m1_t add_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t add_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a += b;
@@ -219,8 +219,8 @@ fixed_uint8m1_t add_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t add_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a += b;
@@ -231,8 +231,8 @@ fixed_uint16m1_t add_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t add_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a += b;
@@ -243,8 +243,8 @@ fixed_uint32m1_t add_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = add <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t add_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a += b;
@@ -255,8 +255,8 @@ fixed_uint64m1_t add_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = fadd <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t add_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a += b;
@@ -267,8 +267,8 @@ fixed_float32m1_t add_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[ADD:%.*]] = fadd <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t add_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a += b;
@@ -280,8 +280,8 @@ fixed_float64m1_t add_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t add_scalar_i8(fixed_int8m1_t a, int8_t b) {
   return a + b;
@@ -293,8 +293,8 @@ fixed_int8m1_t add_scalar_i8(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t add_scalar_i16(fixed_int16m1_t a, int16_t b) {
   return a + b;
@@ -306,8 +306,8 @@ fixed_int16m1_t add_scalar_i16(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t add_scalar_i32(fixed_int32m1_t a, int32_t b) {
   return a + b;
@@ -319,8 +319,8 @@ fixed_int32m1_t add_scalar_i32(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t add_scalar_i64(fixed_int64m1_t a, int64_t b) {
   return a + b;
@@ -332,8 +332,8 @@ fixed_int64m1_t add_scalar_i64(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t add_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
   return a + b;
@@ -345,8 +345,8 @@ fixed_uint8m1_t add_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t add_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
   return a + b;
@@ -358,8 +358,8 @@ fixed_uint16m1_t add_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t add_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
   return a + b;
@@ -371,8 +371,8 @@ fixed_uint32m1_t add_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = add <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t add_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
   return a + b;
@@ -384,8 +384,8 @@ fixed_uint64m1_t add_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x float> [[SPLAT_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = fadd <8 x float> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t add_scalar_f32(fixed_float32m1_t a, float b) {
   return a + b;
@@ -397,8 +397,8 @@ fixed_float32m1_t add_scalar_f32(fixed_float32m1_t a, float b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x double> [[SPLAT_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[ADD:%.*]] = fadd <4 x double> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[ADD]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[ADD]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t add_scalar_f64(fixed_float64m1_t a, double b) {
   return a + b;
@@ -411,8 +411,8 @@ fixed_float64m1_t add_scalar_f64(fixed_float64m1_t a, double b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t sub_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a - b;
@@ -423,8 +423,8 @@ fixed_int8m1_t sub_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t sub_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a - b;
@@ -435,8 +435,8 @@ fixed_int16m1_t sub_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t sub_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a - b;
@@ -447,8 +447,8 @@ fixed_int32m1_t sub_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t sub_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a - b;
@@ -459,8 +459,8 @@ fixed_int64m1_t sub_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t sub_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a - b;
@@ -471,8 +471,8 @@ fixed_uint8m1_t sub_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t sub_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a - b;
@@ -483,8 +483,8 @@ fixed_uint16m1_t sub_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t sub_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a - b;
@@ -495,8 +495,8 @@ fixed_uint32m1_t sub_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t sub_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a - b;
@@ -507,8 +507,8 @@ fixed_uint64m1_t sub_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = fsub <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t sub_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a - b;
@@ -519,8 +519,8 @@ fixed_float32m1_t sub_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = fsub <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t sub_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a - b;
@@ -531,8 +531,8 @@ fixed_float64m1_t sub_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t sub_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a - b;
@@ -543,8 +543,8 @@ fixed_int8m1_t sub_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t sub_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a - b;
@@ -555,8 +555,8 @@ fixed_int16m1_t sub_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t sub_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a - b;
@@ -567,8 +567,8 @@ fixed_int32m1_t sub_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t sub_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a - b;
@@ -579,8 +579,8 @@ fixed_int64m1_t sub_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t sub_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a - b;
@@ -591,8 +591,8 @@ fixed_uint8m1_t sub_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t sub_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a - b;
@@ -603,8 +603,8 @@ fixed_uint16m1_t sub_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t sub_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a - b;
@@ -615,8 +615,8 @@ fixed_uint32m1_t sub_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t sub_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a - b;
@@ -627,8 +627,8 @@ fixed_uint64m1_t sub_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = fsub <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t sub_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a - b;
@@ -639,8 +639,8 @@ fixed_float32m1_t sub_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SUB:%.*]] = fsub <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t sub_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a - b;
@@ -652,8 +652,8 @@ fixed_float64m1_t sub_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t sub_scalar_i8(fixed_int8m1_t a, int8_t b) {
   return a - b;
@@ -665,8 +665,8 @@ fixed_int8m1_t sub_scalar_i8(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t sub_scalar_i16(fixed_int16m1_t a, int16_t b) {
   return a - b;
@@ -678,8 +678,8 @@ fixed_int16m1_t sub_scalar_i16(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t sub_scalar_i32(fixed_int32m1_t a, int32_t b) {
   return a - b;
@@ -691,8 +691,8 @@ fixed_int32m1_t sub_scalar_i32(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t sub_scalar_i64(fixed_int64m1_t a, int64_t b) {
   return a - b;
@@ -704,8 +704,8 @@ fixed_int64m1_t sub_scalar_i64(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t sub_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
   return a - b;
@@ -717,8 +717,8 @@ fixed_uint8m1_t sub_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t sub_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
   return a - b;
@@ -730,8 +730,8 @@ fixed_uint16m1_t sub_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t sub_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
   return a - b;
@@ -743,8 +743,8 @@ fixed_uint32m1_t sub_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t sub_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
   return a - b;
@@ -756,8 +756,8 @@ fixed_uint64m1_t sub_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x float> [[SPLAT_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = fsub <8 x float> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t sub_scalar_f32(fixed_float32m1_t a, float b) {
   return a - b;
@@ -769,8 +769,8 @@ fixed_float32m1_t sub_scalar_f32(fixed_float32m1_t a, float b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x double> [[SPLAT_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SUB:%.*]] = fsub <4 x double> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[SUB]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[SUB]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t sub_scalar_f64(fixed_float64m1_t a, double b) {
   return a - b;
@@ -783,8 +783,8 @@ fixed_float64m1_t sub_scalar_f64(fixed_float64m1_t a, double b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t mul_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a * b;
@@ -795,8 +795,8 @@ fixed_int8m1_t mul_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t mul_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a * b;
@@ -807,8 +807,8 @@ fixed_int16m1_t mul_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t mul_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a * b;
@@ -819,8 +819,8 @@ fixed_int32m1_t mul_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t mul_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a * b;
@@ -831,8 +831,8 @@ fixed_int64m1_t mul_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t mul_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a * b;
@@ -843,8 +843,8 @@ fixed_uint8m1_t mul_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t mul_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a * b;
@@ -855,8 +855,8 @@ fixed_uint16m1_t mul_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t mul_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a * b;
@@ -867,8 +867,8 @@ fixed_uint32m1_t mul_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t mul_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a * b;
@@ -879,8 +879,8 @@ fixed_uint64m1_t mul_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = fmul <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t mul_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a * b;
@@ -891,8 +891,8 @@ fixed_float32m1_t mul_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = fmul <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t mul_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a * b;
@@ -903,8 +903,8 @@ fixed_float64m1_t mul_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t mul_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a * b;
@@ -915,8 +915,8 @@ fixed_int8m1_t mul_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t mul_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a * b;
@@ -927,8 +927,8 @@ fixed_int16m1_t mul_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t mul_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a * b;
@@ -939,8 +939,8 @@ fixed_int32m1_t mul_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t mul_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a * b;
@@ -951,8 +951,8 @@ fixed_int64m1_t mul_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t mul_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a * b;
@@ -963,8 +963,8 @@ fixed_uint8m1_t mul_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t mul_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a * b;
@@ -975,8 +975,8 @@ fixed_uint16m1_t mul_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t mul_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a * b;
@@ -987,8 +987,8 @@ fixed_uint32m1_t mul_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t mul_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a * b;
@@ -999,8 +999,8 @@ fixed_uint64m1_t mul_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = fmul <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t mul_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a * b;
@@ -1011,8 +1011,8 @@ fixed_float32m1_t mul_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[MUL:%.*]] = fmul <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t mul_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a * b;
@@ -1024,8 +1024,8 @@ fixed_float64m1_t mul_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t mul_scalar_i8(fixed_int8m1_t a, int8_t b) {
   return a * b;
@@ -1037,8 +1037,8 @@ fixed_int8m1_t mul_scalar_i8(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t mul_scalar_i16(fixed_int16m1_t a, int16_t b) {
   return a * b;
@@ -1050,8 +1050,8 @@ fixed_int16m1_t mul_scalar_i16(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t mul_scalar_i32(fixed_int32m1_t a, int32_t b) {
   return a * b;
@@ -1063,8 +1063,8 @@ fixed_int32m1_t mul_scalar_i32(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t mul_scalar_i64(fixed_int64m1_t a, int64_t b) {
   return a * b;
@@ -1076,8 +1076,8 @@ fixed_int64m1_t mul_scalar_i64(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t mul_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
   return a * b;
@@ -1089,8 +1089,8 @@ fixed_uint8m1_t mul_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t mul_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
   return a * b;
@@ -1102,8 +1102,8 @@ fixed_uint16m1_t mul_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t mul_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
   return a * b;
@@ -1115,8 +1115,8 @@ fixed_uint32m1_t mul_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t mul_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
   return a * b;
@@ -1128,8 +1128,8 @@ fixed_uint64m1_t mul_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x float> [[SPLAT_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = fmul <8 x float> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t mul_scalar_f32(fixed_float32m1_t a, float b) {
   return a * b;
@@ -1141,8 +1141,8 @@ fixed_float32m1_t mul_scalar_f32(fixed_float32m1_t a, float b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x double> [[SPLAT_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[MUL:%.*]] = fmul <4 x double> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[MUL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[MUL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t mul_scalar_f64(fixed_float64m1_t a, double b) {
   return a * b;
@@ -1155,8 +1155,8 @@ fixed_float64m1_t mul_scalar_f64(fixed_float64m1_t a, double b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t div_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a / b;
@@ -1167,8 +1167,8 @@ fixed_int8m1_t div_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t div_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a / b;
@@ -1179,8 +1179,8 @@ fixed_int16m1_t div_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t div_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a / b;
@@ -1191,8 +1191,8 @@ fixed_int32m1_t div_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t div_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a / b;
@@ -1203,8 +1203,8 @@ fixed_int64m1_t div_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t div_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a / b;
@@ -1215,8 +1215,8 @@ fixed_uint8m1_t div_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t div_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a / b;
@@ -1227,8 +1227,8 @@ fixed_uint16m1_t div_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t div_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a / b;
@@ -1239,8 +1239,8 @@ fixed_uint32m1_t div_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t div_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a / b;
@@ -1251,8 +1251,8 @@ fixed_uint64m1_t div_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = fdiv <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t div_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a / b;
@@ -1263,8 +1263,8 @@ fixed_float32m1_t div_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = fdiv <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t div_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a / b;
@@ -1275,8 +1275,8 @@ fixed_float64m1_t div_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t div_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a / b;
@@ -1287,8 +1287,8 @@ fixed_int8m1_t div_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t div_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a / b;
@@ -1299,8 +1299,8 @@ fixed_int16m1_t div_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t div_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a / b;
@@ -1311,8 +1311,8 @@ fixed_int32m1_t div_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t div_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a / b;
@@ -1323,8 +1323,8 @@ fixed_int64m1_t div_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t div_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a / b;
@@ -1335,8 +1335,8 @@ fixed_uint8m1_t div_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t div_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a / b;
@@ -1347,8 +1347,8 @@ fixed_uint16m1_t div_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t div_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a / b;
@@ -1359,8 +1359,8 @@ fixed_uint32m1_t div_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t div_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a / b;
@@ -1371,8 +1371,8 @@ fixed_uint64m1_t div_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = fdiv <8 x float> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t div_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a / b;
@@ -1383,8 +1383,8 @@ fixed_float32m1_t div_inplace_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[DIV:%.*]] = fdiv <4 x double> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t div_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a / b;
@@ -1396,8 +1396,8 @@ fixed_float64m1_t div_inplace_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t div_scalar_i8(fixed_int8m1_t a, int8_t b) {
   return a / b;
@@ -1409,8 +1409,8 @@ fixed_int8m1_t div_scalar_i8(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t div_scalar_i16(fixed_int16m1_t a, int16_t b) {
   return a / b;
@@ -1422,8 +1422,8 @@ fixed_int16m1_t div_scalar_i16(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t div_scalar_i32(fixed_int32m1_t a, int32_t b) {
   return a / b;
@@ -1435,8 +1435,8 @@ fixed_int32m1_t div_scalar_i32(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t div_scalar_i64(fixed_int64m1_t a, int64_t b) {
   return a / b;
@@ -1448,8 +1448,8 @@ fixed_int64m1_t div_scalar_i64(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t div_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
   return a / b;
@@ -1461,8 +1461,8 @@ fixed_uint8m1_t div_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t div_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
   return a / b;
@@ -1474,8 +1474,8 @@ fixed_uint16m1_t div_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t div_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
   return a / b;
@@ -1487,8 +1487,8 @@ fixed_uint32m1_t div_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = udiv <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t div_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
   return a / b;
@@ -1500,8 +1500,8 @@ fixed_uint64m1_t div_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x float> [[SPLAT_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = fdiv <8 x float> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x float> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x float> @llvm.vector.insert.nxv2f32.v8f32(<vscale x 2 x float> undef, <8 x float> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[CAST_SCALABLE]]
 //
 fixed_float32m1_t div_scalar_f32(fixed_float32m1_t a, float b) {
   return a / b;
@@ -1513,8 +1513,8 @@ fixed_float32m1_t div_scalar_f32(fixed_float32m1_t a, float b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x double> [[SPLAT_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[DIV:%.*]] = fdiv <4 x double> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[DIV]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[DIV]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[CAST_SCALABLE]]
 //
 fixed_float64m1_t div_scalar_f64(fixed_float64m1_t a, double b) {
   return a / b;
@@ -1527,8 +1527,8 @@ fixed_float64m1_t div_scalar_f64(fixed_float64m1_t a, double b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t rem_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a % b;
@@ -1539,8 +1539,8 @@ fixed_int8m1_t rem_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t rem_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a % b;
@@ -1551,8 +1551,8 @@ fixed_int16m1_t rem_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t rem_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a % b;
@@ -1563,8 +1563,8 @@ fixed_int32m1_t rem_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t rem_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a % b;
@@ -1575,8 +1575,8 @@ fixed_int64m1_t rem_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t rem_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a % b;
@@ -1587,8 +1587,8 @@ fixed_uint8m1_t rem_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t rem_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a % b;
@@ -1599,8 +1599,8 @@ fixed_uint16m1_t rem_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t rem_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a % b;
@@ -1611,8 +1611,8 @@ fixed_uint32m1_t rem_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t rem_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a % b;
@@ -1623,8 +1623,8 @@ fixed_uint64m1_t rem_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t rem_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a % b;
@@ -1635,8 +1635,8 @@ fixed_int8m1_t rem_inplace_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t rem_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a % b;
@@ -1647,8 +1647,8 @@ fixed_int16m1_t rem_inplace_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t rem_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a % b;
@@ -1659,8 +1659,8 @@ fixed_int32m1_t rem_inplace_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = srem <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t rem_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a % b;
@@ -1671,8 +1671,8 @@ fixed_int64m1_t rem_inplace_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t rem_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a % b;
@@ -1683,8 +1683,8 @@ fixed_uint8m1_t rem_inplace_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t rem_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a % b;
@@ -1695,8 +1695,8 @@ fixed_uint16m1_t rem_inplace_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t rem_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a % b;
@@ -1707,8 +1707,8 @@ fixed_uint32m1_t rem_inplace_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[REM:%.*]] = urem <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t rem_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a % b;
@@ -1720,8 +1720,8 @@ fixed_uint64m1_t rem_inplace_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = srem <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t rem_scalar_i8(fixed_int8m1_t a, int8_t b) {
   return a % b;
@@ -1733,8 +1733,8 @@ fixed_int8m1_t rem_scalar_i8(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = srem <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t rem_scalar_i16(fixed_int16m1_t a, int16_t b) {
   return a % b;
@@ -1746,8 +1746,8 @@ fixed_int16m1_t rem_scalar_i16(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = srem <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t rem_scalar_i32(fixed_int32m1_t a, int32_t b) {
   return a % b;
@@ -1759,8 +1759,8 @@ fixed_int32m1_t rem_scalar_i32(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = srem <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t rem_scalar_i64(fixed_int64m1_t a, int64_t b) {
   return a % b;
@@ -1772,8 +1772,8 @@ fixed_int64m1_t rem_scalar_i64(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = urem <32 x i8> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t rem_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
   return a % b;
@@ -1785,8 +1785,8 @@ fixed_uint8m1_t rem_scalar_u8(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = urem <16 x i16> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t rem_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
   return a % b;
@@ -1798,8 +1798,8 @@ fixed_uint16m1_t rem_scalar_u16(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = urem <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t rem_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
   return a % b;
@@ -1811,8 +1811,8 @@ fixed_uint32m1_t rem_scalar_u32(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[REM:%.*]] = urem <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[REM]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t rem_scalar_u64(fixed_uint64m1_t a, uint64_t b) {
   return a % b;

diff  --git a/clang/test/CodeGen/riscv-rvv-vls-bitwise-ops.c b/clang/test/CodeGen/riscv-rvv-vls-bitwise-ops.c
index 9821e26ce0bf8c..bb97707a7a9a13 100644
--- a/clang/test/CodeGen/riscv-rvv-vls-bitwise-ops.c
+++ b/clang/test/CodeGen/riscv-rvv-vls-bitwise-ops.c
@@ -39,8 +39,8 @@ typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__ri
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t and_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a & b;
@@ -51,8 +51,8 @@ fixed_int8m1_t and_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t and_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a & b;
@@ -63,8 +63,8 @@ fixed_int16m1_t and_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t and_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a & b;
@@ -75,8 +75,8 @@ fixed_int32m1_t and_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t and_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a & b;
@@ -87,8 +87,8 @@ fixed_int64m1_t and_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t and_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a & b;
@@ -99,8 +99,8 @@ fixed_uint8m1_t and_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t and_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a & b;
@@ -111,8 +111,8 @@ fixed_uint16m1_t and_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t and_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a & b;
@@ -123,8 +123,8 @@ fixed_uint32m1_t and_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[AND:%.*]] = and <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[AND]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[AND]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t and_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a & b;
@@ -137,8 +137,8 @@ fixed_uint64m1_t and_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t or_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a | b;
@@ -149,8 +149,8 @@ fixed_int8m1_t or_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t or_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a | b;
@@ -161,8 +161,8 @@ fixed_int16m1_t or_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t or_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a | b;
@@ -173,8 +173,8 @@ fixed_int32m1_t or_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t or_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a | b;
@@ -185,8 +185,8 @@ fixed_int64m1_t or_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t or_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a | b;
@@ -197,8 +197,8 @@ fixed_uint8m1_t or_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t or_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a | b;
@@ -209,8 +209,8 @@ fixed_uint16m1_t or_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t or_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a | b;
@@ -221,8 +221,8 @@ fixed_uint32m1_t or_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[OR:%.*]] = or <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[OR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[OR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t or_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a | b;
@@ -235,8 +235,8 @@ fixed_uint64m1_t or_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t xor_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a ^ b;
@@ -247,8 +247,8 @@ fixed_int8m1_t xor_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t xor_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a ^ b;
@@ -259,8 +259,8 @@ fixed_int16m1_t xor_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t xor_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a ^ b;
@@ -271,8 +271,8 @@ fixed_int32m1_t xor_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t xor_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a ^ b;
@@ -283,8 +283,8 @@ fixed_int64m1_t xor_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t xor_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a ^ b;
@@ -295,8 +295,8 @@ fixed_uint8m1_t xor_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t xor_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a ^ b;
@@ -307,8 +307,8 @@ fixed_uint16m1_t xor_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t xor_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a ^ b;
@@ -319,8 +319,8 @@ fixed_uint32m1_t xor_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[XOR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[XOR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t xor_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a ^ b;
@@ -332,8 +332,8 @@ fixed_uint64m1_t xor_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <32 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t not_i8(fixed_int8m1_t a) {
   return ~a;
@@ -343,8 +343,8 @@ fixed_int8m1_t not_i8(fixed_int8m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <16 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t not_i16(fixed_int16m1_t a) {
   return ~a;
@@ -354,8 +354,8 @@ fixed_int16m1_t not_i16(fixed_int16m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <8 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t not_i32(fixed_int32m1_t a) {
   return ~a;
@@ -365,8 +365,8 @@ fixed_int32m1_t not_i32(fixed_int32m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <4 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t not_i64(fixed_int64m1_t a) {
   return ~a;
@@ -376,8 +376,8 @@ fixed_int64m1_t not_i64(fixed_int64m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <32 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t not_u8(fixed_uint8m1_t a) {
   return ~a;
@@ -387,8 +387,8 @@ fixed_uint8m1_t not_u8(fixed_uint8m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <16 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t not_u16(fixed_uint16m1_t a) {
   return ~a;
@@ -398,8 +398,8 @@ fixed_uint16m1_t not_u16(fixed_uint16m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <8 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t not_u32(fixed_uint32m1_t a) {
   return ~a;
@@ -409,8 +409,8 @@ fixed_uint32m1_t not_u32(fixed_uint32m1_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[NOT:%.*]] = xor <4 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[NOT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[NOT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t not_u64(fixed_uint64m1_t a) {
   return ~a;

diff  --git a/clang/test/CodeGen/riscv-rvv-vls-compare-ops.c b/clang/test/CodeGen/riscv-rvv-vls-compare-ops.c
index 39bc764a3c570c..f22b4fbb5251fd 100644
--- a/clang/test/CodeGen/riscv-rvv-vls-compare-ops.c
+++ b/clang/test/CodeGen/riscv-rvv-vls-compare-ops.c
@@ -40,8 +40,8 @@ typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__ri
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t eq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a == b;
@@ -53,8 +53,8 @@ fixed_int8m1_t eq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t eq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a == b;
@@ -66,8 +66,8 @@ fixed_int16m1_t eq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t eq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a == b;
@@ -79,8 +79,8 @@ fixed_int32m1_t eq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t eq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a == b;
@@ -92,8 +92,8 @@ fixed_int64m1_t eq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t eq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a == b;
@@ -105,8 +105,8 @@ fixed_int8m1_t eq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t eq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a == b;
@@ -118,8 +118,8 @@ fixed_int16m1_t eq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t eq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a == b;
@@ -131,8 +131,8 @@ fixed_int32m1_t eq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t eq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a == b;
@@ -144,8 +144,8 @@ fixed_int64m1_t eq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp oeq <8 x float> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t eq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a == b;
@@ -157,8 +157,8 @@ fixed_int32m1_t eq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp oeq <4 x double> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t eq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a == b;
@@ -172,8 +172,8 @@ fixed_int64m1_t eq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t neq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a != b;
@@ -185,8 +185,8 @@ fixed_int8m1_t neq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t neq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a != b;
@@ -198,8 +198,8 @@ fixed_int16m1_t neq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t neq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a != b;
@@ -211,8 +211,8 @@ fixed_int32m1_t neq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t neq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a != b;
@@ -224,8 +224,8 @@ fixed_int64m1_t neq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t neq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a != b;
@@ -237,8 +237,8 @@ fixed_int8m1_t neq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t neq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a != b;
@@ -250,8 +250,8 @@ fixed_int16m1_t neq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t neq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a != b;
@@ -263,8 +263,8 @@ fixed_int32m1_t neq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ne <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t neq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a != b;
@@ -276,8 +276,8 @@ fixed_int64m1_t neq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp une <8 x float> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t neq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a != b;
@@ -289,8 +289,8 @@ fixed_int32m1_t neq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp une <4 x double> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t neq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a != b;
@@ -304,8 +304,8 @@ fixed_int64m1_t neq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp slt <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t lt_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a < b;
@@ -317,8 +317,8 @@ fixed_int8m1_t lt_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp slt <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t lt_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a < b;
@@ -330,8 +330,8 @@ fixed_int16m1_t lt_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t lt_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a < b;
@@ -343,8 +343,8 @@ fixed_int32m1_t lt_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp slt <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t lt_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a < b;
@@ -356,8 +356,8 @@ fixed_int64m1_t lt_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ult <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t lt_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a < b;
@@ -369,8 +369,8 @@ fixed_int8m1_t lt_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ult <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t lt_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a < b;
@@ -382,8 +382,8 @@ fixed_int16m1_t lt_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ult <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t lt_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a < b;
@@ -395,8 +395,8 @@ fixed_int32m1_t lt_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ult <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t lt_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a < b;
@@ -408,8 +408,8 @@ fixed_int64m1_t lt_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp olt <8 x float> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t lt_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a < b;
@@ -421,8 +421,8 @@ fixed_int32m1_t lt_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp olt <4 x double> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t lt_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a < b;
@@ -436,8 +436,8 @@ fixed_int64m1_t lt_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sle <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t leq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a <= b;
@@ -449,8 +449,8 @@ fixed_int8m1_t leq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sle <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t leq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a <= b;
@@ -462,8 +462,8 @@ fixed_int16m1_t leq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sle <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t leq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a <= b;
@@ -475,8 +475,8 @@ fixed_int32m1_t leq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sle <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t leq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a <= b;
@@ -488,8 +488,8 @@ fixed_int64m1_t leq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ule <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t leq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a <= b;
@@ -501,8 +501,8 @@ fixed_int8m1_t leq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ule <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t leq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a <= b;
@@ -514,8 +514,8 @@ fixed_int16m1_t leq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ule <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t leq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a <= b;
@@ -527,8 +527,8 @@ fixed_int32m1_t leq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ule <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t leq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a <= b;
@@ -540,8 +540,8 @@ fixed_int64m1_t leq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp ole <8 x float> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t leq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a <= b;
@@ -553,8 +553,8 @@ fixed_int32m1_t leq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp ole <4 x double> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t leq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a <= b;
@@ -568,8 +568,8 @@ fixed_int64m1_t leq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t gt_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a > b;
@@ -581,8 +581,8 @@ fixed_int8m1_t gt_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t gt_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a > b;
@@ -594,8 +594,8 @@ fixed_int16m1_t gt_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t gt_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a > b;
@@ -607,8 +607,8 @@ fixed_int32m1_t gt_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t gt_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a > b;
@@ -620,8 +620,8 @@ fixed_int64m1_t gt_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t gt_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a > b;
@@ -633,8 +633,8 @@ fixed_int8m1_t gt_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t gt_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a > b;
@@ -646,8 +646,8 @@ fixed_int16m1_t gt_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t gt_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a > b;
@@ -659,8 +659,8 @@ fixed_int32m1_t gt_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t gt_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a > b;
@@ -672,8 +672,8 @@ fixed_int64m1_t gt_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt <8 x float> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t gt_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a > b;
@@ -685,8 +685,8 @@ fixed_int32m1_t gt_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp ogt <4 x double> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t gt_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a > b;
@@ -700,8 +700,8 @@ fixed_int64m1_t gt_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sge <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t geq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a >= b;
@@ -713,8 +713,8 @@ fixed_int8m1_t geq_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sge <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t geq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a >= b;
@@ -726,8 +726,8 @@ fixed_int16m1_t geq_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sge <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t geq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a >= b;
@@ -739,8 +739,8 @@ fixed_int32m1_t geq_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp sge <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t geq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a >= b;
@@ -752,8 +752,8 @@ fixed_int64m1_t geq_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp uge <32 x i8> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t geq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a >= b;
@@ -765,8 +765,8 @@ fixed_int8m1_t geq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp uge <16 x i16> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t geq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a >= b;
@@ -778,8 +778,8 @@ fixed_int16m1_t geq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp uge <8 x i32> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t geq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a >= b;
@@ -791,8 +791,8 @@ fixed_int32m1_t geq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = icmp uge <4 x i64> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t geq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a >= b;
@@ -804,8 +804,8 @@ fixed_int64m1_t geq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp oge <8 x float> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t geq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
   return a >= b;
@@ -817,8 +817,8 @@ fixed_int32m1_t geq_f32(fixed_float32m1_t a, fixed_float32m1_t b) {
 // CHECK-NEXT:    [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[CMP:%.*]] = fcmp oge <4 x double> [[A]], [[B]]
 // CHECK-NEXT:    [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t geq_f64(fixed_float64m1_t a, fixed_float64m1_t b) {
   return a >= b;

diff  --git a/clang/test/CodeGen/riscv-rvv-vls-shift-ops.c b/clang/test/CodeGen/riscv-rvv-vls-shift-ops.c
index f5ef09d7262b8d..d88a5ba239e821 100644
--- a/clang/test/CodeGen/riscv-rvv-vls-shift-ops.c
+++ b/clang/test/CodeGen/riscv-rvv-vls-shift-ops.c
@@ -37,8 +37,8 @@ typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__ri
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t lshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a << b;
@@ -49,8 +49,8 @@ fixed_int8m1_t lshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t rshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
   return a >> b;
@@ -61,8 +61,8 @@ fixed_int8m1_t rshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t lshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a << b;
@@ -73,8 +73,8 @@ fixed_uint8m1_t lshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <32 x i8> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t rshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
   return a >> b;
@@ -85,8 +85,8 @@ fixed_uint8m1_t rshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t lshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a << b;
@@ -97,8 +97,8 @@ fixed_int16m1_t lshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t rshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
   return a >> b;
@@ -109,8 +109,8 @@ fixed_int16m1_t rshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t lshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a << b;
@@ -121,8 +121,8 @@ fixed_uint16m1_t lshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <16 x i16> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t rshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
   return a >> b;
@@ -133,8 +133,8 @@ fixed_uint16m1_t rshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t lshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a << b;
@@ -145,8 +145,8 @@ fixed_int32m1_t lshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t rshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
   return a >> b;
@@ -157,8 +157,8 @@ fixed_int32m1_t rshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t lshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a << b;
@@ -169,8 +169,8 @@ fixed_uint32m1_t lshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <8 x i32> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t rshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
   return a >> b;
@@ -181,8 +181,8 @@ fixed_uint32m1_t rshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t lshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a << b;
@@ -193,8 +193,8 @@ fixed_int64m1_t lshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t rshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
   return a >> b;
@@ -205,8 +205,8 @@ fixed_int64m1_t rshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t lshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a << b;
@@ -217,8 +217,8 @@ fixed_uint64m1_t lshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <4 x i64> [[A]], [[B]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t rshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
   return a >> b;
@@ -232,8 +232,8 @@ fixed_uint64m1_t rshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
 // CHECK-NEXT:    [[SHL:%.*]] = shl <32 x i8> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t lshift_i8_rsplat(fixed_int8m1_t a, int8_t b) {
   return a << b;
@@ -245,8 +245,8 @@ fixed_int8m1_t lshift_i8_rsplat(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <32 x i8> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t lshift_i8_lsplat(fixed_int8m1_t a, int8_t b) {
   return b << a;
@@ -260,8 +260,8 @@ fixed_int8m1_t lshift_i8_lsplat(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <32 x i8> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t rshift_i8_rsplat(fixed_int8m1_t a, int8_t b) {
   return a >> b;
@@ -273,8 +273,8 @@ fixed_int8m1_t rshift_i8_rsplat(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <32 x i8> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_int8m1_t rshift_i8_lsplat(fixed_int8m1_t a, int8_t b) {
   return b >> a;
@@ -288,8 +288,8 @@ fixed_int8m1_t rshift_i8_lsplat(fixed_int8m1_t a, int8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
 // CHECK-NEXT:    [[SHL:%.*]] = shl <32 x i8> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t lshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) {
   return a << b;
@@ -301,8 +301,8 @@ fixed_uint8m1_t lshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <32 x i8> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t lshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) {
   return b << a;
@@ -316,8 +316,8 @@ fixed_uint8m1_t lshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8>
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <32 x i8> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t rshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) {
   return a >> b;
@@ -329,8 +329,8 @@ fixed_uint8m1_t rshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <32 x i8> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 8 x i8> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[CAST_SCALABLE]]
 //
 fixed_uint8m1_t rshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) {
   return b >> a;
@@ -344,8 +344,8 @@ fixed_uint8m1_t rshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
 // CHECK-NEXT:    [[SHL:%.*]] = shl <16 x i16> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t lshift_i16_rsplat(fixed_int16m1_t a, int16_t b) {
   return a << b;
@@ -357,8 +357,8 @@ fixed_int16m1_t lshift_i16_rsplat(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <16 x i16> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t lshift_i16_lsplat(fixed_int16m1_t a, int16_t b) {
   return b << a;
@@ -372,8 +372,8 @@ fixed_int16m1_t lshift_i16_lsplat(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <16 x i16> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t rshift_i16_rsplat(fixed_int16m1_t a, int16_t b) {
   return a >> b;
@@ -385,8 +385,8 @@ fixed_int16m1_t rshift_i16_rsplat(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <16 x i16> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_int16m1_t rshift_i16_lsplat(fixed_int16m1_t a, int16_t b) {
   return b >> a;
@@ -400,8 +400,8 @@ fixed_int16m1_t rshift_i16_lsplat(fixed_int16m1_t a, int16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
 // CHECK-NEXT:    [[SHL:%.*]] = shl <16 x i16> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t lshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) {
   return a << b;
@@ -413,8 +413,8 @@ fixed_uint16m1_t lshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <16 x i16> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t lshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) {
   return b << a;
@@ -428,8 +428,8 @@ fixed_uint16m1_t lshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16>
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <16 x i16> [[A]], [[SH_PROM]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t rshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) {
   return a >> b;
@@ -441,8 +441,8 @@ fixed_uint16m1_t rshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <16 x i16> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i16> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[CAST_SCALABLE]]
 //
 fixed_uint16m1_t rshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) {
   return b >> a;
@@ -454,8 +454,8 @@ fixed_uint16m1_t rshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t lshift_i32_rsplat(fixed_int32m1_t a, int32_t b) {
   return a << b;
@@ -467,8 +467,8 @@ fixed_int32m1_t lshift_i32_rsplat(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i32> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t lshift_i32_lsplat(fixed_int32m1_t a, int32_t b) {
   return b << a;
@@ -480,8 +480,8 @@ fixed_int32m1_t lshift_i32_lsplat(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t rshift_i32_rsplat(fixed_int32m1_t a, int32_t b) {
   return a >> b;
@@ -493,8 +493,8 @@ fixed_int32m1_t rshift_i32_rsplat(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <8 x i32> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_int32m1_t rshift_i32_lsplat(fixed_int32m1_t a, int32_t b) {
   return b >> a;
@@ -506,8 +506,8 @@ fixed_int32m1_t rshift_i32_lsplat(fixed_int32m1_t a, int32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t lshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) {
   return a << b;
@@ -519,8 +519,8 @@ fixed_uint32m1_t lshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i32> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t lshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) {
   return b << a;
@@ -532,8 +532,8 @@ fixed_uint32m1_t lshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <8 x i32> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t rshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) {
   return a >> b;
@@ -545,8 +545,8 @@ fixed_uint32m1_t rshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <8 x i32> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
 //
 fixed_uint32m1_t rshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) {
   return b >> a;
@@ -558,8 +558,8 @@ fixed_uint32m1_t rshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t lshift_i64_rsplat(fixed_int64m1_t a, int64_t b) {
   return a << b;
@@ -571,8 +571,8 @@ fixed_int64m1_t lshift_i64_rsplat(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i64> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t lshift_i64_lsplat(fixed_int64m1_t a, int64_t b) {
   return b << a;
@@ -584,8 +584,8 @@ fixed_int64m1_t lshift_i64_lsplat(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t rshift_i64_rsplat(fixed_int64m1_t a, int64_t b) {
   return a >> b;
@@ -597,8 +597,8 @@ fixed_int64m1_t rshift_i64_rsplat(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = ashr <4 x i64> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_int64m1_t rshift_i64_lsplat(fixed_int64m1_t a, int64_t b) {
   return b >> a;
@@ -610,8 +610,8 @@ fixed_int64m1_t rshift_i64_lsplat(fixed_int64m1_t a, int64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t lshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) {
   return a << b;
@@ -623,8 +623,8 @@ fixed_uint64m1_t lshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i64> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHL]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t lshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) {
   return b << a;
@@ -636,8 +636,8 @@ fixed_uint64m1_t lshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <4 x i64> [[A]], [[SPLAT_SPLAT]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t rshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) {
   return a >> b;
@@ -649,8 +649,8 @@ fixed_uint64m1_t rshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) {
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR:%.*]] = lshr <4 x i64> [[SPLAT_SPLAT]], [[A]]
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
-// CHECK-NEXT:    ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SHR]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[CAST_SCALABLE]]
 //
 fixed_uint64m1_t rshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) {
   return b >> a;


        


More information about the cfe-commits mailing list