[llvm] 9fd7523 - [LLVM] Port SVE tests in llvm/test/CodeGen/AArch64 to use splat()

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 3 07:36:39 PDT 2024


Author: Paul Walker
Date: 2024-10-03T14:35:32Z
New Revision: 9fd75233ca757cf172d10703ac82fc162ef8ec0e

URL: https://github.com/llvm/llvm-project/commit/9fd75233ca757cf172d10703ac82fc162ef8ec0e
DIFF: https://github.com/llvm/llvm-project/commit/9fd75233ca757cf172d10703ac82fc162ef8ec0e.diff

LOG: [LLVM] Port SVE tests in llvm/test/CodeGen/AArch64 to use splat()

This is preparation work towards making such splats use Constant{Int,FP}
by default for scalable vectors.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
    llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
    llvm/test/CodeGen/AArch64/sme-disable-rematerialize-with-streaming-mode-changes.ll
    llvm/test/CodeGen/AArch64/sve-adr.ll
    llvm/test/CodeGen/AArch64/sve-cmp-folds.ll
    llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
    llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
    llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
    llvm/test/CodeGen/AArch64/sve-fadda-select.ll
    llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
    llvm/test/CodeGen/AArch64/sve-fp-immediates-merging.ll
    llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
    llvm/test/CodeGen/AArch64/sve-insert-vector.ll
    llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
    llvm/test/CodeGen/AArch64/sve-int-imm.ll
    llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
    llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-index.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm-zero.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-undef.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-logical-undef.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-undef.ll
    llvm/test/CodeGen/AArch64/sve-knownbits.ll
    llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
    llvm/test/CodeGen/AArch64/sve-splat-sext.ll
    llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-stepvector.ll
    llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll
    llvm/test/CodeGen/AArch64/sve-vector-splat.ll
    llvm/test/CodeGen/AArch64/sve2-int-mul.ll
    llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-int-arith-imm.ll
    llvm/test/CodeGen/AArch64/sve2-sra.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll b/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
index 71f4da2b465c13..487b726253cc78 100644
--- a/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
+++ b/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
@@ -10,10 +10,8 @@ define <vscale x 4 x i32> @sext_inreg(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
-  %in = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %in, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %sext = shl <vscale x 4 x i32> %a, %splat
-  %conv = ashr <vscale x 4 x i32> %sext, %splat
+  %sext = shl <vscale x 4 x i32> %a, splat(i32 16)
+  %conv = ashr <vscale x 4 x i32> %sext, splat(i32 16)
   ret <vscale x 4 x i32> %conv
 }
 
@@ -23,12 +21,8 @@ define <vscale x 4 x i32> @ashr_shl(<vscale x 4 x i32> %a)  {
 ; CHECK-NEXT:    lsl z0.s, z0.s, #8
 ; CHECK-NEXT:    asr z0.s, z0.s, #16
 ; CHECK-NEXT:    ret
-  %in1 = insertelement <vscale x 4 x i32> undef, i32 8, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %in1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %in2 = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
-  %splat2 = shufflevector <vscale x 4 x i32> %in2, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i32> %a, %splat1
-  %r = ashr <vscale x 4 x i32> %shl, %splat2
+  %shl = shl <vscale x 4 x i32> %a, splat(i32 8)
+  %r = ashr <vscale x 4 x i32> %shl, splat(i32 16)
   ret <vscale x 4 x i32> %r
 }
 
@@ -38,12 +32,8 @@ define <vscale x 4 x i32> @ashr_shl_illegal_trunc_vec_ty(<vscale x 4 x i32> %a)
 ; CHECK-NEXT:    lsl z0.s, z0.s, #8
 ; CHECK-NEXT:    asr z0.s, z0.s, #11
 ; CHECK-NEXT:    ret
-  %in1 = insertelement <vscale x 4 x i32> undef, i32 8, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %in1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %in2 = insertelement <vscale x 4 x i32> undef, i32 11, i32 0
-  %splat2 = shufflevector <vscale x 4 x i32> %in2, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i32> %a, %splat1
-  %r = ashr <vscale x 4 x i32> %shl, %splat2
+  %shl = shl <vscale x 4 x i32> %a, splat(i32 8)
+  %r = ashr <vscale x 4 x i32> %shl, splat(i32 11)
   ret <vscale x 4 x i32> %r
 }
 
@@ -55,12 +45,8 @@ define <vscale x 4 x i32> @ashr_add_shl_nxv4i8(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    asr z0.s, z0.s, #24
 ; CHECK-NEXT:    ret
-  %in1 = insertelement <vscale x 4 x i32> undef, i32 24, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %in1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %in2 = insertelement <vscale x 4 x i32> undef, i32 16777216, i32 0
-  %splat2 = shufflevector <vscale x 4 x i32> %in2, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %conv = shl <vscale x 4 x i32> %a, %splat1
-  %sext = add <vscale x 4 x i32> %conv, %splat2
-  %conv1 = ashr <vscale x 4 x i32> %sext, %splat1
+  %conv = shl <vscale x 4 x i32> %a, splat(i32 24)
+  %sext = add <vscale x 4 x i32> %conv, splat(i32 16777216)
+  %conv1 = ashr <vscale x 4 x i32> %sext, splat(i32 24)
   ret <vscale x 4 x i32> %conv1
 }

diff  --git a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
index 59a460923e8b71..47fae5a01c931a 100644
--- a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
@@ -1165,9 +1165,7 @@ define <vscale x 2 x i64> @fshl_rot_const_i64(<vscale x 2 x i64> %a){
 ; CHECK-NEXT:    lsl z0.d, z0.d, #3
 ; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %shuf = shufflevector <vscale x 2 x i64> %insert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %fshl = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %shuf)
+  %fshl = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 3))
   ret <vscale x 2 x i64> %fshl
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sme-disable-rematerialize-with-streaming-mode-changes.ll b/llvm/test/CodeGen/AArch64/sme-disable-rematerialize-with-streaming-mode-changes.ll
index b3aeb1fcc42da1..5b2aeb04b71da3 100644
--- a/llvm/test/CodeGen/AArch64/sme-disable-rematerialize-with-streaming-mode-changes.ll
+++ b/llvm/test/CodeGen/AArch64/sme-disable-rematerialize-with-streaming-mode-changes.ll
@@ -54,9 +54,7 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %index.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
   call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"() nounwind
-  %ptrue.ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %ptrue = shufflevector <vscale x 16 x i1> %ptrue.ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  call void @bar(<vscale x 16 x i1> %ptrue)
+  call void @bar(<vscale x 16 x i1> splat(i1 true))
   %inc = add nuw nsw i32 %index.03, 1
   %exitcond.not = icmp eq i32 %inc, %N
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body

diff  --git a/llvm/test/CodeGen/AArch64/sve-adr.ll b/llvm/test/CodeGen/AArch64/sve-adr.ll
index 8f8477d8f3f151..388c07cf0cb6da 100644
--- a/llvm/test/CodeGen/AArch64/sve-adr.ll
+++ b/llvm/test/CodeGen/AArch64/sve-adr.ll
@@ -14,10 +14,8 @@ define <vscale x 4 x i32>  @adr_32bit_lsl1(<vscale x 4 x i32>  %base, <vscale x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adr z0.s, [z0.s, z1.s, lsl #1]
 ; CHECK-NEXT:    ret
-  %splat_insert = insertelement <vscale x 4 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shiftedOffset = shl <vscale x 4 x i32> %idx, %one
-  %address = add <vscale x 4 x i32> %base, %shiftedOffset
+  %offset = shl <vscale x 4 x i32> %idx, splat (i32 1)
+  %address = add <vscale x 4 x i32> %base, %offset
   ret <vscale x 4 x i32>  %address
 }
 
@@ -26,10 +24,8 @@ define <vscale x 4 x i32>  @adr_32bit_lsl2(<vscale x 4 x i32>  %base, <vscale x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adr z0.s, [z0.s, z1.s, lsl #2]
 ; CHECK-NEXT:    ret
-  %splat_insert = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %two = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shiftedOffset = shl <vscale x 4 x i32> %idx, %two
-  %address = add <vscale x 4 x i32> %base, %shiftedOffset
+  %offset = shl <vscale x 4 x i32> %idx, splat (i32 2)
+  %address = add <vscale x 4 x i32> %base, %offset
   ret <vscale x 4 x i32>  %address
 }
 
@@ -38,10 +34,8 @@ define <vscale x 4 x i32>  @adr_32bit_lsl3(<vscale x 4 x i32>  %base, <vscale x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adr z0.s, [z0.s, z1.s, lsl #3]
 ; CHECK-NEXT:    ret
-  %splat_insert = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %three = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shiftedOffset = shl <vscale x 4 x i32> %idx, %three
-  %address = add <vscale x 4 x i32> %base, %shiftedOffset
+  %offset = shl <vscale x 4 x i32> %idx, splat (i32 3)
+  %address = add <vscale x 4 x i32> %base, %offset
   ret <vscale x 4 x i32>  %address
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-cmp-folds.ll b/llvm/test/CodeGen/AArch64/sve-cmp-folds.ll
index 1b78c253c58dd9..981cc88298a3ed 100644
--- a/llvm/test/CodeGen/AArch64/sve-cmp-folds.ll
+++ b/llvm/test/CodeGen/AArch64/sve-cmp-folds.ll
@@ -8,9 +8,7 @@ define <vscale x 8 x i1> @not_icmp_sle_nxv8i16(<vscale x 8 x i16> %a, <vscale x
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %icmp = icmp sle <vscale x 8 x i16> %a, %b
-  %tmp = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
-  %ones = shufflevector <vscale x 8 x i1> %tmp, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %ones, %icmp
+  %not = xor <vscale x 8 x i1> splat(i1 true), %icmp
   ret <vscale x 8 x i1> %not
 }
 
@@ -21,9 +19,7 @@ define <vscale x 4 x i1> @not_icmp_sgt_nxv4i32(<vscale x 4 x i32> %a, <vscale x
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z1.s, z0.s
 ; CHECK-NEXT:    ret
   %icmp = icmp sgt <vscale x 4 x i32> %a, %b
-  %tmp = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
-  %ones = shufflevector <vscale x 4 x i1> %tmp, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %icmp, %ones
+  %not = xor <vscale x 4 x i1> %icmp, splat(i1 true)
   ret <vscale x 4 x i1> %not
 }
 
@@ -34,9 +30,7 @@ define <vscale x 2 x i1> @not_fcmp_une_nxv2f64(<vscale x 2 x double> %a, <vscale
 ; CHECK-NEXT:    fcmeq p0.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %icmp = fcmp une <vscale x 2 x double> %a, %b
-  %tmp = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
-  %ones = shufflevector <vscale x 2 x i1> %tmp, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %icmp, %ones
+  %not = xor <vscale x 2 x i1> %icmp, splat(i1 true)
   ret <vscale x 2 x i1> %not
 }
 
@@ -47,9 +41,7 @@ define <vscale x 4 x i1> @not_fcmp_uge_nxv4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z1.s, z0.s
 ; CHECK-NEXT:    ret
   %icmp = fcmp uge <vscale x 4 x float> %a, %b
-  %tmp = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
-  %ones = shufflevector <vscale x 4 x i1> %tmp, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %icmp, %ones
+  %not = xor <vscale x 4 x i1> %icmp, splat(i1 true)
   ret <vscale x 4 x i1> %not
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
index c5db3dfdf5e549..2ff32bde25d610 100644
--- a/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
@@ -17,10 +17,8 @@ define void @dead_masked_store_alltrue_same(<vscale x 4 x i32> %val, ptr %a, <vs
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %alltrue.ins = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %alltrue = shufflevector  <vscale x 4 x i1> %alltrue.ins,  <vscale x 4 x i1> poison,  <vscale x 4 x i32> zeroinitializer
   call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
-  call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> %alltrue)
+  call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> splat(i1 true))
   ret void
 }
 
@@ -30,10 +28,8 @@ define void @dead_masked_store_alltrue_bigger(<vscale x 4 x i16> %val, <vscale x
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %alltrue.ins = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %alltrue = shufflevector  <vscale x 4 x i1> %alltrue.ins,  <vscale x 4 x i1> poison,  <vscale x 4 x i32> zeroinitializer
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
-  call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val1, ptr %a, i32 4, <vscale x 4 x i1> %alltrue)
+  call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val1, ptr %a, i32 4, <vscale x 4 x i1> splat(i1 true))
   ret void
 }
 
@@ -44,10 +40,8 @@ define void @dead_masked_store_alltrue_smaller(<vscale x 4 x i32> %val, <vscale
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    st1h { z1.s }, p1, [x0]
 ; CHECK-NEXT:    ret
-  %alltrue.ins = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %alltrue = shufflevector  <vscale x 4 x i1> %alltrue.ins,  <vscale x 4 x i1> poison,  <vscale x 4 x i32> zeroinitializer
   call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
-  call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %val1, ptr %a, i32 4, <vscale x 4 x i1> %alltrue)
+  call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %val1, ptr %a, i32 4, <vscale x 4 x i1> splat(i1 true))
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index b05b46a75b698d..678afc4dea3092 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -392,9 +392,7 @@ define <2 x float> @extract_v2f32_nxv4f32_splat_const() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov v0.2s, #1.00000000
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x float> poison, float 1.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
+  %ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> splat(float 1.0), i64 0)
   ret <2 x float> %ext
 }
 
@@ -403,9 +401,7 @@ define <4 x i32> @extract_v4i32_nxv8i32_splat_const() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.4s, #1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %ins, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32> %splat, i64 0)
+  %ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32> splat(i32 1), i64 0)
   ret <4 x i32> %ext
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
index 3c0bd501f45d8b..cbede1bf8bb74b 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
@@ -1039,9 +1039,7 @@ define <vscale x 2 x float> @extract_nxv2f32_nxv4f32_splat_const() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.s, #1.00000000
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x float> poison, float 1.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %ext = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
+  %ext = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> splat(float 1.0), i64 0)
   ret <vscale x 2 x float> %ext
 }
 
@@ -1050,9 +1048,7 @@ define <vscale x 4 x i32> @extract_nxv4i32_nxv8i32_splat_const() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %ins, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %ext = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %splat, i64 0)
+  %ext = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> splat(i32 1), i64 0)
   ret <vscale x 4 x i32> %ext
 }
 
@@ -1061,9 +1057,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_all_ones() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %ext = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %splat, i64 0)
+  %ext = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> splat(i1 true), i64 0)
   ret <vscale x 2 x i1> %ext
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-fadda-select.ll b/llvm/test/CodeGen/AArch64/sve-fadda-select.ll
index b4d6aa5e09cd99..e7681cae083720 100644
--- a/llvm/test/CodeGen/AArch64/sve-fadda-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fadda-select.ll
@@ -10,9 +10,7 @@ define float @pred_fadda_nxv2f32(float %x, <vscale x 2 x float> %y, <vscale x 2
 ; CHECK-NEXT:    fadda s0, p0, s0, z1.s
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 2 x float> poison, float -0.000000e+00, i32 0
-  %minus0 = shufflevector <vscale x 2 x float> %i, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x float> %y, <vscale x 2 x float> %minus0
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x float> %y, <vscale x 2 x float> splat(float -0.000000e+00)
   %fadda = call float @llvm.vector.reduce.fadd.nxv2f32(float %x, <vscale x 2 x float> %sel)
   ret float %fadda
 }
@@ -24,9 +22,7 @@ define float @pred_fadda_nxv4f32(float %x, <vscale x 4 x float> %y, <vscale x 4
 ; CHECK-NEXT:    fadda s0, p0, s0, z1.s
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 4 x float> poison, float -0.000000e+00, i32 0
-  %minus0 = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %y, <vscale x 4 x float> %minus0
+  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %y, <vscale x 4 x float> splat(float -0.000000e+00)
   %fadda = call float @llvm.vector.reduce.fadd.nxv4f32(float %x, <vscale x 4 x float> %sel)
   ret float %fadda
 }
@@ -38,9 +34,7 @@ define double @pred_fadda_nxv2f64(double %x, <vscale x 2 x double> %y, <vscale x
 ; CHECK-NEXT:    fadda d0, p0, d0, z1.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 2 x double> poison, double -0.000000e+00, i32 0
-  %minus0 = shufflevector <vscale x 2 x double> %i, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %y, <vscale x 2 x double> %minus0
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %y, <vscale x 2 x double> splat(double -0.000000e+00)
   %fadda = call double @llvm.vector.reduce.fadd.nxv2f64(double %x, <vscale x 2 x double> %sel)
   ret double %fadda
 }
@@ -52,9 +46,7 @@ define half @pred_fadda_nxv2f16(half %x, <vscale x 2 x half> %y, <vscale x 2 x i
 ; CHECK-NEXT:    fadda h0, p0, h0, z1.h
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 2 x half> poison, half -0.000000e+00, i32 0
-  %minus0 = shufflevector <vscale x 2 x half> %i, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x half> %y, <vscale x 2 x half> %minus0
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x half> %y, <vscale x 2 x half> splat(half -0.000000e+00)
   %fadda = call half @llvm.vector.reduce.fadd.nxv2f16(half %x, <vscale x 2 x half> %sel)
   ret half %fadda
 }
@@ -66,9 +58,7 @@ define half @pred_fadda_nxv4f16(half %x, <vscale x 4 x half> %y, <vscale x 4 x i
 ; CHECK-NEXT:    fadda h0, p0, h0, z1.h
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 4 x half> poison, half -0.000000e+00, i32 0
-  %minus0 = shufflevector <vscale x 4 x half> %i, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x half> %y, <vscale x 4 x half> %minus0
+  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x half> %y, <vscale x 4 x half> splat(half -0.000000e+00)
   %fadda = call half @llvm.vector.reduce.fadd.nxv4f16(half %x, <vscale x 4 x half> %sel)
   ret half %fadda
 }
@@ -80,9 +70,7 @@ define half @pred_fadda_nxv8f16(half %x, <vscale x 8 x half> %y, <vscale x 8 x i
 ; CHECK-NEXT:    fadda h0, p0, h0, z1.h
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 8 x half> poison, half -0.000000e+00, i32 0
-  %minus0 = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %y, <vscale x 8 x half> %minus0
+  %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %y, <vscale x 8 x half> splat(half -0.000000e+00)
   %fadda = call half @llvm.vector.reduce.fadd.nxv8f16(half %x, <vscale x 8 x half> %sel)
   ret half %fadda
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
index e532743a8658b7..42ec96ad60498b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
@@ -13,9 +13,7 @@ define <vscale x 2 x i64> @fold_loadext_and(ptr %ptr, i32 %needle, <vscale x 2 x
 ; CHECK-NEXT:    ret
   %load = load <vscale x 2 x i32>, ptr %ptr, align 4
   %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
-  %splatinsert = insertelement <vscale x 2 x i64> poison, i64 4294967295, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %and = and <vscale x 2 x i64> %ext, %splat
+  %and = and <vscale x 2 x i64> %ext, splat(i64 4294967295)
   ret <vscale x 2 x i64> %and
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-fp-immediates-merging.ll b/llvm/test/CodeGen/AArch64/sve-fp-immediates-merging.ll
index ef62ffd58bd5d0..e1d883b0e78997 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-immediates-merging.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-immediates-merging.ll
@@ -13,9 +13,7 @@ define <vscale x 8 x half> @fadd_h_immhalf(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fadd <vscale x 8 x half> %a, %splat
+  %out = fadd <vscale x 8 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 8 x half> %out
 }
 
@@ -25,9 +23,7 @@ define <vscale x 8 x half> @fadd_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fadd <vscale x 8 x half> %a, %splat
+  %out = fadd <vscale x 8 x half> %a, splat(half 1.000000e+00)
   ret <vscale x 8 x half> %out
 }
 
@@ -37,9 +33,7 @@ define <vscale x 4 x half> @fadd_4h_immhalf(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fadd <vscale x 4 x half> %a, %splat
+  %out = fadd <vscale x 4 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 4 x half> %out
 }
 
@@ -49,9 +43,7 @@ define <vscale x 4 x half> @fadd_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fadd <vscale x 4 x half> %a, %splat
+  %out = fadd <vscale x 4 x half> %a, splat(half 1.000000e+00)
   ret <vscale x 4 x half> %out
 }
 
@@ -61,9 +53,7 @@ define <vscale x 2 x half> @fadd_2h_immhalf(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fadd <vscale x 2 x half> %a, %splat
+  %out = fadd <vscale x 2 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 2 x half> %out
 }
 
@@ -73,9 +63,7 @@ define <vscale x 2 x half> @fadd_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fadd <vscale x 2 x half> %a, %splat
+  %out = fadd <vscale x 2 x half> %a, splat(half 1.000000e+00)
   ret <vscale x 2 x half> %out
 }
 
@@ -85,9 +73,7 @@ define <vscale x 4 x float> @fadd_s_immhalf(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fadd <vscale x 4 x float> %a, %splat
+  %out = fadd <vscale x 4 x float> %a, splat(float 0.500000e+00)
   ret <vscale x 4 x float> %out
 }
 
@@ -97,9 +83,7 @@ define <vscale x 4 x float> @fadd_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fadd <vscale x 4 x float> %a, %splat
+  %out = fadd <vscale x 4 x float> %a, splat(float 1.000000e+00)
   ret <vscale x 4 x float> %out
 }
 
@@ -109,9 +93,7 @@ define <vscale x 2 x float> @fadd_2s_immhalf(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fadd <vscale x 2 x float> %a, %splat
+  %out = fadd <vscale x 2 x float> %a, splat(float 0.500000e+00)
   ret <vscale x 2 x float> %out
 }
 
@@ -121,9 +103,7 @@ define <vscale x 2 x float> @fadd_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fadd <vscale x 2 x float> %a, %splat
+  %out = fadd <vscale x 2 x float> %a, splat(float 1.000000e+00)
   ret <vscale x 2 x float> %out
 }
 
@@ -134,9 +114,7 @@ define <vscale x 2 x double> @fadd_d_immhalf(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fadd <vscale x 2 x double> %a, %splat
+  %out = fadd <vscale x 2 x double> %a, splat(double 0.500000e+00)
   ret <vscale x 2 x double> %out
 }
 
@@ -146,9 +124,7 @@ define <vscale x 2 x double> @fadd_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fadd <vscale x 2 x double> %a, %splat
+  %out = fadd <vscale x 2 x double> %a, splat(double 1.000000e+00)
   ret <vscale x 2 x double> %out
 }
 
@@ -162,9 +138,7 @@ define <vscale x 8 x half> @fmax_h_immzero(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -174,9 +148,7 @@ define <vscale x 8 x half> @fmax_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -186,9 +158,7 @@ define <vscale x 4 x half> @fmax_4h_immzero(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> zeroinitializer)
   ret <vscale x 4 x half> %out
 }
 
@@ -198,9 +168,7 @@ define <vscale x 4 x half> @fmax_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> splat(half 1.000000e+00))
   ret <vscale x 4 x half> %out
 }
 
@@ -210,9 +178,7 @@ define <vscale x 2 x half> @fmax_2h_immzero(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> zeroinitializer)
   ret <vscale x 2 x half> %out
 }
 
@@ -222,9 +188,7 @@ define <vscale x 2 x half> @fmax_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> splat(half 1.000000e+00))
   ret <vscale x 2 x half> %out
 }
 
@@ -234,9 +198,7 @@ define <vscale x 4 x float> @fmax_s_immzero(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -246,9 +208,7 @@ define <vscale x 4 x float> @fmax_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -258,9 +218,7 @@ define <vscale x 2 x float> @fmax_2s_immzero(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> zeroinitializer)
   ret <vscale x 2 x float> %out
 }
 
@@ -270,9 +228,7 @@ define <vscale x 2 x float> @fmax_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> splat(float 1.000000e+00))
   ret <vscale x 2 x float> %out
 }
 
@@ -282,9 +238,7 @@ define <vscale x 2 x double> @fmax_d_immzero(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -294,9 +248,7 @@ define <vscale x 2 x double> @fmax_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -310,9 +262,7 @@ define <vscale x 8 x half> @fmaxnm_h_immzero(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -322,9 +272,7 @@ define <vscale x 8 x half> @fmaxnm_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -334,9 +282,7 @@ define <vscale x 4 x half> @fmaxnm_4h_immzero(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> zeroinitializer)
   ret <vscale x 4 x half> %out
 }
 
@@ -346,9 +292,7 @@ define <vscale x 4 x half> @fmaxnm_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> splat(half 1.000000e+00))
   ret <vscale x 4 x half> %out
 }
 
@@ -358,9 +302,7 @@ define <vscale x 2 x half> @fmaxnm_2h_immzero(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> zeroinitializer)
   ret <vscale x 2 x half> %out
 }
 
@@ -370,9 +312,7 @@ define <vscale x 2 x half> @fmaxnm_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> splat(half 1.000000e+00))
   ret <vscale x 2 x half> %out
 }
 
@@ -382,9 +322,7 @@ define <vscale x 4 x float> @fmaxnm_s_immzero(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -394,9 +332,7 @@ define <vscale x 4 x float> @fmaxnm_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -406,9 +342,7 @@ define <vscale x 2 x float> @fmaxnm_2s_immzero(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> zeroinitializer)
   ret <vscale x 2 x float> %out
 }
 
@@ -418,9 +352,7 @@ define <vscale x 2 x float> @fmaxnm_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> splat(float 1.000000e+00))
   ret <vscale x 2 x float> %out
 }
 
@@ -430,9 +362,7 @@ define <vscale x 2 x double> @fmaxnm_d_immzero(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -442,9 +372,7 @@ define <vscale x 2 x double> @fmaxnm_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -458,9 +386,7 @@ define <vscale x 8 x half> @fmin_h_immzero(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -470,9 +396,7 @@ define <vscale x 8 x half> @fmin_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -482,9 +406,7 @@ define <vscale x 4 x half> @fmin_4h_immzero(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> zeroinitializer)
   ret <vscale x 4 x half> %out
 }
 
@@ -494,9 +416,7 @@ define <vscale x 4 x half> @fmin_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> splat(half 1.000000e+00))
   ret <vscale x 4 x half> %out
 }
 
@@ -506,9 +426,7 @@ define <vscale x 2 x half> @fmin_2h_immzero(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> zeroinitializer)
   ret <vscale x 2 x half> %out
 }
 
@@ -518,9 +436,7 @@ define <vscale x 2 x half> @fmin_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> splat(half 1.000000e+00))
   ret <vscale x 2 x half> %out
 }
 
@@ -530,9 +446,7 @@ define <vscale x 4 x float> @fmin_s_immzero(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -542,9 +456,7 @@ define <vscale x 4 x float> @fmin_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -554,9 +466,7 @@ define <vscale x 2 x float> @fmin_2s_immzero(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> zeroinitializer)
   ret <vscale x 2 x float> %out
 }
 
@@ -566,9 +476,7 @@ define <vscale x 2 x float> @fmin_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> splat(float 1.000000e+00))
   ret <vscale x 2 x float> %out
 }
 
@@ -578,9 +486,7 @@ define <vscale x 2 x double> @fmin_d_immzero(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -590,9 +496,7 @@ define <vscale x 2 x double> @fmin_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -606,9 +510,7 @@ define <vscale x 8 x half> @fminnm_h_immzero(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -618,9 +520,7 @@ define <vscale x 8 x half> @fminnm_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
+  %out = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -630,9 +530,7 @@ define <vscale x 4 x half> @fminnm_4h_immzero(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> zeroinitializer)
   ret <vscale x 4 x half> %out
 }
 
@@ -642,9 +540,7 @@ define <vscale x 4 x half> @fminnm_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
+  %out = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> splat(half 1.000000e+00))
   ret <vscale x 4 x half> %out
 }
 
@@ -654,9 +550,7 @@ define <vscale x 2 x half> @fminnm_2h_immzero(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> zeroinitializer)
   ret <vscale x 2 x half> %out
 }
 
@@ -666,9 +560,7 @@ define <vscale x 2 x half> @fminnm_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
+  %out = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> splat(half 1.000000e+00))
   ret <vscale x 2 x half> %out
 }
 
@@ -678,9 +570,7 @@ define <vscale x 4 x float> @fminnm_s_immzero(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -690,9 +580,7 @@ define <vscale x 4 x float> @fminnm_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
+  %out = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -702,9 +590,7 @@ define <vscale x 2 x float> @fminnm_2s_immzero(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> zeroinitializer)
   ret <vscale x 2 x float> %out
 }
 
@@ -714,9 +600,7 @@ define <vscale x 2 x float> @fminnm_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
+  %out = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> splat(float 1.000000e+00))
   ret <vscale x 2 x float> %out
 }
 
@@ -726,9 +610,7 @@ define <vscale x 2 x double> @fminnm_d_immzero(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -738,9 +620,7 @@ define <vscale x 2 x double> @fminnm_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
+  %out = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -754,9 +634,7 @@ define <vscale x 8 x half> @fmul_h_immhalf(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fmul <vscale x 8 x half> %a, %splat
+  %out = fmul <vscale x 8 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 8 x half> %out
 }
 
@@ -765,9 +643,7 @@ define <vscale x 8 x half> @fmul_h_immtwo(<vscale x 8 x half> %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.h, z0.h, z0.h
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fmul <vscale x 8 x half> %a, %splat
+  %out = fmul <vscale x 8 x half> %a, splat(half 2.000000e+00)
   ret <vscale x 8 x half> %out
 }
 
@@ -777,9 +653,7 @@ define <vscale x 4 x half> @fmul_4h_immhalf(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fmul <vscale x 4 x half> %a, %splat
+  %out = fmul <vscale x 4 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 4 x half> %out
 }
 
@@ -789,9 +663,7 @@ define <vscale x 4 x half> @fmul_4h_immtwo(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z0.h
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fmul <vscale x 4 x half> %a, %splat
+  %out = fmul <vscale x 4 x half> %a, splat(half 2.000000e+00)
   ret <vscale x 4 x half> %out
 }
 
@@ -801,9 +673,7 @@ define <vscale x 2 x half> @fmul_2h_immhalf(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fmul <vscale x 2 x half> %a, %splat
+  %out = fmul <vscale x 2 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 2 x half> %out
 }
 
@@ -813,9 +683,7 @@ define <vscale x 2 x half> @fmul_2h_immtwo(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z0.h
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fmul <vscale x 2 x half> %a, %splat
+  %out = fmul <vscale x 2 x half> %a, splat(half 2.000000e+00)
   ret <vscale x 2 x half> %out
 }
 
@@ -825,9 +693,7 @@ define <vscale x 4 x float> @fmul_s_immhalf(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fmul <vscale x 4 x float> %a, %splat
+  %out = fmul <vscale x 4 x float> %a, splat(float 0.500000e+00)
   ret <vscale x 4 x float> %out
 }
 
@@ -836,9 +702,7 @@ define <vscale x 4 x float> @fmul_s_immtwo(<vscale x 4 x float> %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.s, z0.s, z0.s
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fmul <vscale x 4 x float> %a, %splat
+  %out = fmul <vscale x 4 x float> %a, splat(float 2.000000e+00)
   ret <vscale x 4 x float> %out
 }
 
@@ -848,9 +712,7 @@ define <vscale x 2 x float> @fmul_2s_immhalf(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fmul <vscale x 2 x float> %a, %splat
+  %out = fmul <vscale x 2 x float> %a, splat(float 0.500000e+00)
   ret <vscale x 2 x float> %out
 }
 
@@ -860,9 +722,7 @@ define <vscale x 2 x float> @fmul_2s_immtwo(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z0.s
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fmul <vscale x 2 x float> %a, %splat
+  %out = fmul <vscale x 2 x float> %a, splat(float 2.000000e+00)
   ret <vscale x 2 x float> %out
 }
 
@@ -872,9 +732,7 @@ define <vscale x 2 x double> @fmul_d_immhalf(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fmul <vscale x 2 x double> %a, %splat
+  %out = fmul <vscale x 2 x double> %a, splat(double 0.500000e+00)
   ret <vscale x 2 x double> %out
 }
 
@@ -883,9 +741,7 @@ define <vscale x 2 x double> @fmul_d_immtwo(<vscale x 2 x double> %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.d, z0.d, z0.d
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fmul <vscale x 2 x double> %a, %splat
+  %out = fmul <vscale x 2 x double> %a, splat(double 2.000000e+00)
   ret <vscale x 2 x double> %out
 }
 
@@ -899,9 +755,7 @@ define <vscale x 8 x half> @fsub_h_immhalf(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fsub <vscale x 8 x half> %a, %splat
+  %out = fsub <vscale x 8 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 8 x half> %out
 }
 
@@ -911,9 +765,7 @@ define <vscale x 8 x half> @fsub_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fsub <vscale x 8 x half> %a, %splat
+  %out = fsub <vscale x 8 x half> %a, splat(half 1.000000e+00)
   ret <vscale x 8 x half> %out
 }
 
@@ -923,9 +775,7 @@ define <vscale x 4 x half> @fsub_4h_immhalf(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x half> %a, %splat
+  %out = fsub <vscale x 4 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 4 x half> %out
 }
 
@@ -935,9 +785,7 @@ define <vscale x 4 x half> @fsub_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x half> %a, %splat
+  %out = fsub <vscale x 4 x half> %a, splat(half 1.000000e+00)
   ret <vscale x 4 x half> %out
 }
 
@@ -947,9 +795,7 @@ define <vscale x 2 x half> @fsub_2h_immhalf(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x half> %a, %splat
+  %out = fsub <vscale x 2 x half> %a, splat(half 0.500000e+00)
   ret <vscale x 2 x half> %out
 }
 
@@ -959,9 +805,7 @@ define <vscale x 2 x half> @fsub_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x half> %a, %splat
+  %out = fsub <vscale x 2 x half> %a, splat(half 1.000000e+00)
   ret <vscale x 2 x half> %out
 }
 
@@ -971,9 +815,7 @@ define <vscale x 4 x float> @fsub_s_immhalf(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x float> %a, %splat
+  %out = fsub <vscale x 4 x float> %a, splat(float 0.500000e+00)
   ret <vscale x 4 x float> %out
 }
 
@@ -983,9 +825,7 @@ define <vscale x 4 x float> @fsub_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x float> %a, %splat
+  %out = fsub <vscale x 4 x float> %a, splat(float 1.000000e+00)
   ret <vscale x 4 x float> %out
 }
 
@@ -995,9 +835,7 @@ define <vscale x 2 x float> @fsub_2s_immhalf(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x float> %a, %splat
+  %out = fsub <vscale x 2 x float> %a, splat(float 0.500000e+00)
   ret <vscale x 2 x float> %out
 }
 
@@ -1007,9 +845,7 @@ define <vscale x 2 x float> @fsub_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x float> %a, %splat
+  %out = fsub <vscale x 2 x float> %a, splat(float 1.000000e+00)
   ret <vscale x 2 x float> %out
 }
 
@@ -1019,9 +855,7 @@ define <vscale x 2 x double> @fsub_d_immhalf(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x double> %a, %splat
+  %out = fsub <vscale x 2 x double> %a, splat(double 0.500000e+00)
   ret <vscale x 2 x double> %out
 }
 
@@ -1031,9 +865,7 @@ define <vscale x 2 x double> @fsub_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x double> %a, %splat
+  %out = fsub <vscale x 2 x double> %a, splat(double 1.000000e+00)
   ret <vscale x 2 x double> %out
 }
 
@@ -1047,9 +879,7 @@ define <vscale x 8 x half> @fsubr_h_immhalf(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fsub <vscale x 8 x half> %splat, %a
+  %out = fsub <vscale x 8 x half> splat(half 0.500000e+00), %a
   ret <vscale x 8 x half> %out
 }
 
@@ -1059,9 +889,7 @@ define <vscale x 8 x half> @fsubr_h_immone(<vscale x 8 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  %out = fsub <vscale x 8 x half> %splat, %a
+  %out = fsub <vscale x 8 x half> splat(half 1.000000e+00), %a
   ret <vscale x 8 x half> %out
 }
 
@@ -1071,9 +899,7 @@ define <vscale x 4 x half> @fsubr_4h_immhalf(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x half> %splat, %a
+  %out = fsub <vscale x 4 x half> splat(half 0.500000e+00), %a
   ret <vscale x 4 x half> %out
 }
 
@@ -1083,9 +909,7 @@ define <vscale x 4 x half> @fsubr_4h_immone(<vscale x 4 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x half> %elt, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x half> %splat, %a
+  %out = fsub <vscale x 4 x half> splat(half 1.000000e+00), %a
   ret <vscale x 4 x half> %out
 }
 
@@ -1095,9 +919,7 @@ define <vscale x 2 x half> @fsubr_2h_immhalf(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x half> %splat, %a
+  %out = fsub <vscale x 2 x half> splat(half 0.500000e+00), %a
   ret <vscale x 2 x half> %out
 }
 
@@ -1107,9 +929,7 @@ define <vscale x 2 x half> @fsubr_2h_immone(<vscale x 2 x half> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x half> %elt, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x half> %splat, %a
+  %out = fsub <vscale x 2 x half> splat(half 1.000000e+00), %a
   ret <vscale x 2 x half> %out
 }
 
@@ -1119,9 +939,7 @@ define <vscale x 4 x float> @fsubr_s_immhalf(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x float> %splat, %a
+  %out = fsub <vscale x 4 x float> splat(float 0.500000e+00), %a
   ret <vscale x 4 x float> %out
 }
 
@@ -1131,9 +949,7 @@ define <vscale x 4 x float> @fsubr_s_immone(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  %out = fsub <vscale x 4 x float> %splat, %a
+  %out = fsub <vscale x 4 x float> splat(float 1.000000e+00), %a
   ret <vscale x 4 x float> %out
 }
 
@@ -1143,9 +959,7 @@ define <vscale x 2 x float> @fsubr_2s_immhalf(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x float> %splat, %a
+  %out = fsub <vscale x 2 x float> splat(float 0.500000e+00), %a
   ret <vscale x 2 x float> %out
 }
 
@@ -1155,9 +969,7 @@ define <vscale x 2 x float> @fsubr_2s_immone(<vscale x 2 x float> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x float> %elt, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x float> %splat, %a
+  %out = fsub <vscale x 2 x float> splat(float 1.000000e+00), %a
   ret <vscale x 2 x float> %out
 }
 
@@ -1167,9 +979,7 @@ define <vscale x 2 x double> @fsubr_d_immhalf(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x double> %splat, %a
+  %out = fsub <vscale x 2 x double> splat(double 0.500000e+00), %a
   ret <vscale x 2 x double> %out
 }
 
@@ -1179,14 +989,10 @@ define <vscale x 2 x double> @fsubr_d_immone(<vscale x 2 x double> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  %out = fsub <vscale x 2 x double> %splat, %a
+  %out = fsub <vscale x 2 x double> splat(double 1.000000e+00), %a
   ret <vscale x 2 x double> %out
 }
 
-;; Arithmetic intrinsic declarations
-
 declare <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
 declare <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
 declare <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index 3030c38e13bf4f..047716292ac3b9 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -13,10 +13,8 @@ define void @scatter_i8_index_offset_maximum(ptr %base, i64 %offset, <vscale x 4
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %t2 = insertelement <vscale x 4 x i64> undef, i64 33554431, i32 0
-  %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  %t4 = mul <vscale x 4 x i64> %t3, %step
+  %t4 = mul <vscale x 4 x i64> splat(i64 33554431), %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
   call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
@@ -34,10 +32,8 @@ define void @scatter_i16_index_offset_minimum(ptr %base, i64 %offset, <vscale x
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %t2 = insertelement <vscale x 4 x i64> undef, i64 -33554432, i32 0
-  %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  %t4 = mul <vscale x 4 x i64> %t3, %step
+  %t4 = mul <vscale x 4 x i64> splat(i64 -33554432), %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i16, ptr %base, <vscale x 4 x i64> %t5
   call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
@@ -55,9 +51,7 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(ptr %base, i64 %offset, <vsca
   %splat.insert0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  %splat.insert1 = insertelement <vscale x 4 x i64> undef, i64 1, i32 0
-  %splat1 = shufflevector <vscale x 4 x i64> %splat.insert1, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %t1 = mul <vscale x 4 x i64> %splat1, %step
+  %t1 = mul <vscale x 4 x i64> splat(i64 1), %step
   %t2 = add <vscale x 4 x i64> %splat0, %t1
   %t3 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t2
   %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t3, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
@@ -117,10 +111,8 @@ define void @scatter_i8_index_offset_maximum_plus_one(ptr %base, i64 %offset, <v
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %t2 = insertelement <vscale x 4 x i64> undef, i64 33554432, i32 0
-  %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  %t4 = mul <vscale x 4 x i64> %t3, %step
+  %t4 = mul <vscale x 4 x i64> splat(i64 33554432), %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
   call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
@@ -148,10 +140,8 @@ define void @scatter_i8_index_offset_minimum_minus_one(ptr %base, i64 %offset, <
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %t2 = insertelement <vscale x 4 x i64> undef, i64 -33554433, i32 0
-  %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  %t4 = mul <vscale x 4 x i64> %t3, %step
+  %t4 = mul <vscale x 4 x i64> splat(i64 -33554433), %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
   call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
@@ -178,10 +168,8 @@ define void @scatter_i8_index_stride_too_big(ptr %base, i64 %offset, <vscale x 4
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %t2 = insertelement <vscale x 4 x i64> undef, i64 4611686018427387904, i32 0
-  %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  %t4 = mul <vscale x 4 x i64> %t3, %step
+  %t4 = mul <vscale x 4 x i64> splat(i64 4611686018427387904), %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
   call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
@@ -310,9 +298,7 @@ define void @scatter_f16_index_add_add_mul(ptr %base, i64 %offset, i64 %offset2,
   %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %add1 = add <vscale x 4 x i64> %splat.offset, %step
   %add2 = add <vscale x 4 x i64> %add1, %splat.offset2
-  %splat.const8.ins = insertelement <vscale x 4 x i64> undef, i64 8, i32 0
-  %splat.const8 = shufflevector <vscale x 4 x i64> %splat.const8.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %mul = mul <vscale x 4 x i64> %add2, %splat.const8
+  %mul = mul <vscale x 4 x i64> %add2, splat(i64 8)
   %gep = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %mul
   %gep.bc = bitcast <vscale x 4 x ptr> %gep to <vscale x 4 x ptr>
   call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
@@ -350,9 +336,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with__vec_plus_imm_offsets
 ; CHECK-NEXT:    mov w8, #8 // =0x8
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 1, i64 0
-  %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
+  %offsets = add <vscale x 2 x i64> %vector_offsets, splat(i64 1)
   %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
   %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %data
@@ -428,9 +412,7 @@ define void @masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets(<vscale x 2
 ; CHECK-NEXT:    mov w8, #8 // =0x8
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 1, i64 0
-  %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
+  %offsets = add <vscale x 2 x i64> %vector_offsets, splat(i64 1)
   %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
   call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 5efe9e2819d5e8..5d1d7cf65c09e4 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -267,9 +267,7 @@ define <vscale x 4 x i32> @insert_nxv1i32_nxv4i32_undef() nounwind {
 ; CHECK-NEXT:    mov z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 1 x i32> undef, i32 1, i32 0
-  %subvec = shufflevector <vscale x 1 x i32> %0, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
-  %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> %subvec, i64 0)
+  %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> splat(i32 1), i64 0)
   ret <vscale x 4 x i32> %retval
 }
 
@@ -279,9 +277,7 @@ define <vscale x 6 x i16> @insert_nxv1i16_nxv6i16_undef() nounwind {
 ; CHECK-NEXT:    mov z0.h, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 1 x i16> undef, i16 1, i32 0
-  %subvec = shufflevector <vscale x 1 x i16> %0, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
-  %retval = call <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> undef, <vscale x 1 x i16> %subvec, i64 0)
+  %retval = call <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> undef, <vscale x 1 x i16> splat(i16 1), i64 0)
   ret <vscale x 6 x i16> %retval
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
index 52bd79e7a7e60d..25b27c47f3d7a5 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
@@ -9,10 +9,8 @@ define <vscale x 16 x i8> @smax_i8_pos(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.b, z0.b, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp sgt <vscale x 16 x i8> %a, splat(i8 27)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 27)
   ret <vscale x 16 x i8> %res
 }
 
@@ -21,10 +19,8 @@ define <vscale x 16 x i8> @smax_i8_neg(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.b, z0.b, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 -58, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp sgt <vscale x 16 x i8> %a, splat(i8 -58)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -58)
   ret <vscale x 16 x i8> %res
 }
 
@@ -33,10 +29,8 @@ define <vscale x 8 x i16> @smax_i16_pos(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.h, z0.h, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp sgt <vscale x 8 x i16> %a, splat(i16 27)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 27)
   ret <vscale x 8 x i16> %res
 }
 
@@ -45,10 +39,8 @@ define <vscale x 8 x i16> @smax_i16_neg(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.h, z0.h, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -58, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp sgt <vscale x 8 x i16> %a, splat(i16 -58)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -58)
   ret <vscale x 8 x i16> %res
 }
 
@@ -59,10 +51,8 @@ define <vscale x 8 x i16> @smax_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    smax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp sgt <vscale x 8 x i16> %a, splat(i16 257)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 257)
   ret <vscale x 8 x i16> %res
 }
 
@@ -71,10 +61,8 @@ define <vscale x 4 x i32> @smax_i32_pos(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.s, z0.s, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp sgt <vscale x 4 x i32> %a, splat(i32 27)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 27)
   ret <vscale x 4 x i32> %res
 }
 
@@ -83,10 +71,8 @@ define <vscale x 4 x i32> @smax_i32_neg(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.s, z0.s, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -58, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp sgt <vscale x 4 x i32> %a, splat(i32 -58)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -58)
   ret <vscale x 4 x i32> %res
 }
 
@@ -97,10 +83,8 @@ define <vscale x 4 x i32> @smax_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    smax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -129, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp sgt <vscale x 4 x i32> %a, splat(i32 -129)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -129)
   ret <vscale x 4 x i32> %res
 }
 
@@ -109,10 +93,8 @@ define <vscale x 2 x i64> @smax_i64_pos(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.d, z0.d, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp sgt <vscale x 2 x i64> %a, splat(i64 27)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 27)
   ret <vscale x 2 x i64> %res
 }
 
@@ -121,10 +103,8 @@ define <vscale x 2 x i64> @smax_i64_neg(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.d, z0.d, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -58, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp sgt <vscale x 2 x i64> %a, splat(i64 -58)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -58)
   ret <vscale x 2 x i64> %res
 }
 
@@ -135,10 +115,8 @@ define <vscale x 2 x i64> @smax_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    smax z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp sgt <vscale x 2 x i64> %a, splat(i64 65535)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 65535)
   ret <vscale x 2 x i64> %res
 }
 
@@ -150,10 +128,8 @@ define <vscale x 16 x i8> @smin_i8_pos(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.b, z0.b, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp slt <vscale x 16 x i8> %a, splat(i8 27)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 27)
   ret <vscale x 16 x i8> %res
 }
 
@@ -162,10 +138,8 @@ define <vscale x 16 x i8> @smin_i8_neg(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.b, z0.b, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 -58, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp slt <vscale x 16 x i8> %a, splat(i8 -58)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -58)
   ret <vscale x 16 x i8> %res
 }
 
@@ -174,10 +148,8 @@ define <vscale x 8 x i16> @smin_i16_pos(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.h, z0.h, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp slt <vscale x 8 x i16> %a, splat(i16 27)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 27)
   ret <vscale x 8 x i16> %res
 }
 
@@ -186,10 +158,8 @@ define <vscale x 8 x i16> @smin_i16_neg(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.h, z0.h, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -58, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp slt <vscale x 8 x i16> %a, splat(i16 -58)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -58)
   ret <vscale x 8 x i16> %res
 }
 
@@ -200,10 +170,8 @@ define <vscale x 8 x i16> @smin_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    smin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp slt <vscale x 8 x i16> %a, splat(i16 257)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 257)
   ret <vscale x 8 x i16> %res
 }
 
@@ -212,10 +180,8 @@ define <vscale x 4 x i32> @smin_i32_pos(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.s, z0.s, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp slt <vscale x 4 x i32> %a, splat(i32 27)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 27)
   ret <vscale x 4 x i32> %res
 }
 
@@ -224,10 +190,8 @@ define <vscale x 4 x i32> @smin_i32_neg(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.s, z0.s, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -58, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp slt <vscale x 4 x i32> %a, splat(i32 -58)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -58)
   ret <vscale x 4 x i32> %res
 }
 
@@ -238,10 +202,8 @@ define <vscale x 4 x i32> @smin_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    smin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -129, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp slt <vscale x 4 x i32> %a, splat(i32 -129)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -129)
   ret <vscale x 4 x i32> %res
 }
 
@@ -250,10 +212,8 @@ define <vscale x 2 x i64> @smin_i64_pos(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.d, z0.d, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp slt <vscale x 2 x i64> %a, splat(i64 27)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 27)
   ret <vscale x 2 x i64> %res
 }
 
@@ -262,10 +222,8 @@ define <vscale x 2 x i64> @smin_i64_neg(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.d, z0.d, #-58
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -58, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp slt <vscale x 2 x i64> %a, splat(i64 -58)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -58)
   ret <vscale x 2 x i64> %res
 }
 
@@ -276,10 +234,8 @@ define <vscale x 2 x i64> @smin_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    smin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp slt <vscale x 2 x i64> %a, splat(i64 65535)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 65535)
   ret <vscale x 2 x i64> %res
 }
 
@@ -291,10 +247,8 @@ define <vscale x 16 x i8> @umax_i8_pos(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.b, z0.b, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp ugt <vscale x 16 x i8> %a, splat(i8 27)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 27)
   ret <vscale x 16 x i8> %res
 }
 
@@ -303,10 +257,8 @@ define <vscale x 16 x i8> @umax_i8_large(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.b, z0.b, #129
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 129, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp ugt <vscale x 16 x i8> %a, splat(i8 129)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 129)
   ret <vscale x 16 x i8> %res
 }
 
@@ -315,10 +267,8 @@ define <vscale x 8 x i16> @umax_i16_pos(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.h, z0.h, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp ugt <vscale x 8 x i16> %a, splat(i16 27)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 27)
   ret <vscale x 8 x i16> %res
 }
 
@@ -329,10 +279,8 @@ define <vscale x 8 x i16> @umax_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    umax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp ugt <vscale x 8 x i16> %a, splat(i16 257)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 257)
   ret <vscale x 8 x i16> %res
 }
 
@@ -341,10 +289,8 @@ define <vscale x 4 x i32> @umax_i32_pos(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.s, z0.s, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp ugt <vscale x 4 x i32> %a, splat(i32 27)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 27)
   ret <vscale x 4 x i32> %res
 }
 
@@ -356,10 +302,8 @@ define <vscale x 4 x i32> @umax_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    umax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp ugt <vscale x 4 x i32> %a, splat(i32 257)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 257)
   ret <vscale x 4 x i32> %res
 }
 
@@ -368,10 +312,8 @@ define <vscale x 2 x i64> @umax_i64_pos(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.d, z0.d, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp ugt <vscale x 2 x i64> %a, splat(i64 27)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 27)
   ret <vscale x 2 x i64> %res
 }
 
@@ -382,10 +324,8 @@ define <vscale x 2 x i64> @umax_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    umax z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp ugt <vscale x 2 x i64> %a, splat(i64 65535)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 65535)
   ret <vscale x 2 x i64> %res
 }
 
@@ -397,10 +337,8 @@ define <vscale x 16 x i8> @umin_i8_pos(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.b, z0.b, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp ult <vscale x 16 x i8> %a, splat(i8 27)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 27)
   ret <vscale x 16 x i8> %res
 }
 
@@ -409,10 +347,8 @@ define <vscale x 16 x i8> @umin_i8_large(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.b, z0.b, #129
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 129, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 16 x i8> %a, %splat
-  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  %cmp = icmp ult <vscale x 16 x i8> %a, splat(i8 129)
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 129)
   ret <vscale x 16 x i8> %res
 }
 
@@ -421,10 +357,8 @@ define <vscale x 8 x i16> @umin_i16_pos(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.h, z0.h, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp ult <vscale x 8 x i16> %a, splat(i16 27)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 27)
   ret <vscale x 8 x i16> %res
 }
 
@@ -435,10 +369,8 @@ define <vscale x 8 x i16> @umin_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    umin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 8 x i16> %a, %splat
-  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  %cmp = icmp ult <vscale x 8 x i16> %a, splat(i16 257)
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 257)
   ret <vscale x 8 x i16> %res
 }
 
@@ -447,10 +379,8 @@ define <vscale x 4 x i32> @umin_i32_pos(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.s, z0.s, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp ult <vscale x 4 x i32> %a, splat(i32 27)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 27)
   ret <vscale x 4 x i32> %res
 }
 
@@ -462,10 +392,8 @@ define <vscale x 4 x i32> @umin_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    umin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 4 x i32> %a, %splat
-  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  %cmp = icmp ult <vscale x 4 x i32> %a, splat(i32 257)
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 257)
   ret <vscale x 4 x i32> %res
 }
 
@@ -474,10 +402,8 @@ define <vscale x 2 x i64> @umin_i64_pos(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.d, z0.d, #27
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp ult <vscale x 2 x i64> %a, splat(i64 27)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 27)
   ret <vscale x 2 x i64> %res
 }
 
@@ -488,10 +414,8 @@ define <vscale x 2 x i64> @umin_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 2 x i64> %a, %splat
-  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  %cmp = icmp ult <vscale x 2 x i64> %a, splat(i64 65535)
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 65535)
   ret <vscale x 2 x i64> %res
 }
 
@@ -503,9 +427,7 @@ define <vscale x 16 x i8> @mul_i8_neg(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.b, z0.b, #-17
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 -17, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res = mul <vscale x 16 x i8> %a, %splat
+  %res = mul <vscale x 16 x i8> %a, splat(i8 -17)
   ret <vscale x 16 x i8> %res
 }
 
@@ -514,9 +436,7 @@ define <vscale x 16 x i8> @mul_i8_pos(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.b, z0.b, #105
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 105, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res = mul <vscale x 16 x i8> %a, %splat
+  %res = mul <vscale x 16 x i8> %a, splat(i8 105)
   ret <vscale x 16 x i8> %res
 }
 
@@ -525,9 +445,7 @@ define <vscale x 8 x i16> @mul_i16_neg(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.h, z0.h, #-17
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -17, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = mul <vscale x 8 x i16> %a, %splat
+  %res = mul <vscale x 8 x i16> %a, splat(i16 -17)
   ret <vscale x 8 x i16> %res
 }
 
@@ -536,9 +454,7 @@ define <vscale x 8 x i16> @mul_i16_pos(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.h, z0.h, #105
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 105, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = mul <vscale x 8 x i16> %a, %splat
+  %res = mul <vscale x 8 x i16> %a, splat(i16 105)
   ret <vscale x 8 x i16> %res
 }
 
@@ -547,9 +463,7 @@ define <vscale x 4 x i32> @mul_i32_neg(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.s, z0.s, #-17
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -17, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = mul <vscale x 4 x i32> %a, %splat
+  %res = mul <vscale x 4 x i32> %a, splat(i32 -17)
   ret <vscale x 4 x i32> %res
 }
 
@@ -558,9 +472,7 @@ define <vscale x 4 x i32> @mul_i32_pos(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.s, z0.s, #105
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 105, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = mul <vscale x 4 x i32> %a, %splat
+  %res = mul <vscale x 4 x i32> %a, splat(i32 105)
   ret <vscale x 4 x i32> %res
 }
 
@@ -569,9 +481,7 @@ define <vscale x 2 x i64> @mul_i64_neg(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.d, z0.d, #-17
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -17, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
+  %res = mul <vscale x 2 x i64> %a, splat(i64 -17)
   ret <vscale x 2 x i64> %res
 }
 
@@ -580,9 +490,7 @@ define <vscale x 2 x i64> @mul_i64_pos(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.d, z0.d, #105
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 105, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
+  %res = mul <vscale x 2 x i64> %a, splat(i64 105)
   ret <vscale x 2 x i64> %res
 }
 
@@ -593,9 +501,7 @@ define <vscale x 8 x i16> @mul_i16_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = mul <vscale x 8 x i16> %a, %splat
+  %res = mul <vscale x 8 x i16> %a, splat(i16 255)
   ret <vscale x 8 x i16> %res
 }
 
@@ -606,9 +512,7 @@ define <vscale x 4 x i32> @mul_i32_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mul z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = mul <vscale x 4 x i32> %a, %splat
+  %res = mul <vscale x 4 x i32> %a, splat(i32 255)
   ret <vscale x 4 x i32> %res
 }
 
@@ -619,9 +523,7 @@ define <vscale x 2 x i64> @mul_i64_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
+  %res = mul <vscale x 2 x i64> %a, splat(i64 255)
   ret <vscale x 2 x i64> %res
 }
 
@@ -632,9 +534,7 @@ define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i8> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.b, z0.b, #7
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %lshr = ashr <vscale x 16 x i8> %a, %splat
+  %lshr = ashr <vscale x 16 x i8> %a, splat(i8 7)
   ret <vscale x 16 x i8> %lshr
 }
 
@@ -643,9 +543,7 @@ define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i16> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.h, z0.h, #15
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %ashr = ashr <vscale x 8 x i16> %a, %splat
+  %ashr = ashr <vscale x 8 x i16> %a, splat(i16 15)
   ret <vscale x 8 x i16> %ashr
 }
 
@@ -654,9 +552,7 @@ define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i32> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.s, z0.s, #31
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %ashr = ashr <vscale x 4 x i32> %a, %splat
+  %ashr = ashr <vscale x 4 x i32> %a, splat(i32 31)
   ret <vscale x 4 x i32> %ashr
 }
 
@@ -665,9 +561,7 @@ define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i64> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.d, z0.d, #63
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 63, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %ashr = ashr <vscale x 2 x i64> %a, %splat
+  %ashr = ashr <vscale x 2 x i64> %a, splat(i64 63)
   ret <vscale x 2 x i64> %ashr
 }
 
@@ -678,9 +572,7 @@ define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i8> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.b, z0.b, #7
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %shl = shl <vscale x 16 x i8> %a, %splat
+  %shl = shl <vscale x 16 x i8> %a, splat(i8 7)
   ret <vscale x 16 x i8> %shl
 }
 
@@ -689,9 +581,7 @@ define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i16> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.h, z0.h, #15
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %shl = shl <vscale x 8 x i16> %a, %splat
+  %shl = shl <vscale x 8 x i16> %a, splat(i16 15)
   ret <vscale x 8 x i16> %shl
 }
 
@@ -700,9 +590,7 @@ define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i32> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.s, z0.s, #31
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i32> %a, %splat
+  %shl = shl <vscale x 4 x i32> %a, splat(i32 31)
   ret <vscale x 4 x i32> %shl
 }
 
@@ -711,9 +599,7 @@ define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i64> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #63
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 63, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %shl = shl <vscale x 2 x i64> %a, %splat
+  %shl = shl <vscale x 2 x i64> %a, splat(i64 63)
   ret <vscale x 2 x i64> %shl
 }
 
@@ -724,9 +610,7 @@ define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i8> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.b, z0.b, #7
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %lshr = lshr <vscale x 16 x i8> %a, %splat
+  %lshr = lshr <vscale x 16 x i8> %a, splat(i8 7)
   ret <vscale x 16 x i8> %lshr
 }
 
@@ -735,9 +619,7 @@ define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i16> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.h, z0.h, #15
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %lshr = lshr <vscale x 8 x i16> %a, %splat
+  %lshr = lshr <vscale x 8 x i16> %a, splat(i16 15)
   ret <vscale x 8 x i16> %lshr
 }
 
@@ -746,9 +628,7 @@ define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i32> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.s, z0.s, #31
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %lshr = lshr <vscale x 4 x i32> %a, %splat
+  %lshr = lshr <vscale x 4 x i32> %a, splat(i32 31)
   ret <vscale x 4 x i32> %lshr
 }
 
@@ -757,9 +637,7 @@ define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i64> %a){
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.d, z0.d, #63
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 63, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %lshr = lshr <vscale x 2 x i64> %a, %splat
+  %lshr = lshr <vscale x 2 x i64> %a, splat(i64 63)
   ret <vscale x 2 x i64> %lshr
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-imm.ll
index d82e7f8c039cce..4be1abe8420081 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-imm.ll
@@ -9,9 +9,7 @@ define <vscale x 16 x i8> @add_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: add_i8_low
 ; CHECK: add  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  add <vscale x 16 x i8> %a, %splat
+  %res =  add <vscale x 16 x i8> %a, splat(i8 30)
   ret <vscale x 16 x i8> %res
 }
 
@@ -19,9 +17,7 @@ define <vscale x 8 x i16> @add_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: add_i16_low
 ; CHECK: add  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  add <vscale x 8 x i16> %a, %splat
+  %res =  add <vscale x 8 x i16> %a, splat(i16 30)
   ret <vscale x 8 x i16> %res
 }
 
@@ -29,9 +25,7 @@ define <vscale x 8 x i16> @add_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: add_i16_high
 ; CHECK: add  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  add <vscale x 8 x i16> %a, %splat
+  %res =  add <vscale x 8 x i16> %a, splat(i16 1024)
   ret <vscale x 8 x i16> %res
 }
 
@@ -39,9 +33,7 @@ define <vscale x 4 x i32> @add_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: add_i32_low
 ; CHECK: add  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = add <vscale x 4 x i32> %a, %splat
+  %res = add <vscale x 4 x i32> %a, splat(i32 30)
   ret <vscale x 4 x i32> %res
 }
 
@@ -49,9 +41,7 @@ define <vscale x 4 x i32> @add_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: add_i32_high
 ; CHECK: add  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  add <vscale x 4 x i32> %a, %splat
+  %res =  add <vscale x 4 x i32> %a, splat(i32 1024)
   ret <vscale x 4 x i32> %res
 }
 
@@ -59,9 +49,7 @@ define <vscale x 2 x i64> @add_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: add_i64_low
 ; CHECK: add  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  add <vscale x 2 x i64> %a, %splat
+  %res =  add <vscale x 2 x i64> %a, splat(i64 30)
   ret <vscale x 2 x i64> %res
 }
 
@@ -69,9 +57,7 @@ define <vscale x 2 x i64> @add_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: add_i64_high
 ; CHECK: add  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = add <vscale x 2 x i64> %a, %splat
+  %res = add <vscale x 2 x i64> %a, splat(i64 1024)
   ret <vscale x 2 x i64> %res
 }
 
@@ -79,9 +65,7 @@ define <vscale x 16 x i8> @add_i8_signedness(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: add_i8_signedness
 ; CHECK: add  z0.b, z0.b, #255
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 255, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  add <vscale x 16 x i8> %a, %splat
+  %res =  add <vscale x 16 x i8> %a, splat(i8 255)
   ret <vscale x 16 x i8> %res
 }
 
@@ -89,9 +73,7 @@ define <vscale x 8 x i16> @add_i16_signedness(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: add_i16_signedness
 ; CHECK: add  z0.h, z0.h, #65280
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 65280, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  add <vscale x 8 x i16> %a, %splat
+  %res =  add <vscale x 8 x i16> %a, splat(i16 65280)
   ret <vscale x 8 x i16> %res
 }
 
@@ -100,9 +82,7 @@ define <vscale x 16 x i8> @subr_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: subr_i8_low
 ; CHECK: subr  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  sub <vscale x 16 x i8> %splat, %a
+  %res =  sub <vscale x 16 x i8> splat(i8 30), %a
   ret <vscale x 16 x i8> %res
 }
 
@@ -110,9 +90,7 @@ define <vscale x 8 x i16> @subr_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: subr_i16_low
 ; CHECK: subr  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  sub <vscale x 8 x i16> %splat, %a
+  %res =  sub <vscale x 8 x i16> splat(i16 30), %a
   ret <vscale x 8 x i16> %res
 }
 
@@ -120,9 +98,7 @@ define <vscale x 8 x i16> @subr_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: subr_i16_high
 ; CHECK: subr  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  sub <vscale x 8 x i16> %splat, %a
+  %res =  sub <vscale x 8 x i16> splat(i16 1024), %a
   ret <vscale x 8 x i16> %res
 }
 
@@ -130,9 +106,7 @@ define <vscale x 4 x i32> @subr_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: subr_i32_low
 ; CHECK: subr  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  sub <vscale x 4 x i32> %splat, %a
+  %res =  sub <vscale x 4 x i32> splat(i32 30), %a
   ret <vscale x 4 x i32> %res
 }
 
@@ -140,9 +114,7 @@ define <vscale x 4 x i32> @subr_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: subr_i32_high
 ; CHECK: subr  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  sub <vscale x 4 x i32> %splat, %a
+  %res =  sub <vscale x 4 x i32> splat(i32 1024), %a
   ret <vscale x 4 x i32> %res
 }
 
@@ -150,9 +122,7 @@ define <vscale x 2 x i64> @subr_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: subr_i64_low
 ; CHECK: subr  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  sub <vscale x 2 x i64> %splat, %a
+  %res =  sub <vscale x 2 x i64> splat(i64 30), %a
   ret <vscale x 2 x i64> %res
 }
 
@@ -160,9 +130,7 @@ define <vscale x 2 x i64> @subr_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: subr_i64_high
 ; CHECK: subr  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  sub <vscale x 2 x i64> %splat, %a
+  %res =  sub <vscale x 2 x i64> splat(i64 1024), %a
   ret <vscale x 2 x i64> %res
 }
 
@@ -171,9 +139,7 @@ define <vscale x 16 x i8> @sub_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: sub_i8_low
 ; CHECK: sub  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  sub <vscale x 16 x i8> %a, %splat
+  %res =  sub <vscale x 16 x i8> %a, splat(i8 30)
   ret <vscale x 16 x i8> %res
 }
 
@@ -181,9 +147,7 @@ define <vscale x 8 x i16> @sub_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: sub_i16_low
 ; CHECK: sub  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  sub <vscale x 8 x i16> %a, %splat
+  %res =  sub <vscale x 8 x i16> %a, splat(i16 30)
   ret <vscale x 8 x i16> %res
 }
 
@@ -191,9 +155,7 @@ define <vscale x 8 x i16> @sub_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: sub_i16_high
 ; CHECK: sub  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  sub <vscale x 8 x i16> %a, %splat
+  %res =  sub <vscale x 8 x i16> %a, splat(i16 1024)
   ret <vscale x 8 x i16> %res
 }
 
@@ -201,9 +163,7 @@ define <vscale x 4 x i32> @sub_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sub_i32_low
 ; CHECK: sub  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = sub <vscale x 4 x i32> %a, %splat
+  %res = sub <vscale x 4 x i32> %a, splat(i32 30)
   ret <vscale x 4 x i32> %res
 }
 
@@ -211,9 +171,7 @@ define <vscale x 4 x i32> @sub_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sub_i32_high
 ; CHECK: sub  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  sub <vscale x 4 x i32> %a, %splat
+  %res =  sub <vscale x 4 x i32> %a, splat(i32 1024)
   ret <vscale x 4 x i32> %res
 }
 
@@ -221,9 +179,7 @@ define <vscale x 2 x i64> @sub_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: sub_i64_low
 ; CHECK: sub  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  sub <vscale x 2 x i64> %a, %splat
+  %res =  sub <vscale x 2 x i64> %a, splat(i64 30)
   ret <vscale x 2 x i64> %res
 }
 
@@ -231,9 +187,7 @@ define <vscale x 2 x i64> @sub_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: sub_i64_high
 ; CHECK: sub  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = sub <vscale x 2 x i64> %a, %splat
+  %res = sub <vscale x 2 x i64> %a, splat(i64 1024)
   ret <vscale x 2 x i64> %res
 }
 
@@ -242,9 +196,7 @@ define <vscale x 16 x i8> @sqadd_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: sqadd_i8_low
 ; CHECK: sqadd  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
+  %res =  call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 30))
   ret <vscale x 16 x i8> %res
 }
 
@@ -252,9 +204,7 @@ define <vscale x 8 x i16> @sqadd_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: sqadd_i16_low
 ; CHECK: sqadd  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 30))
   ret <vscale x 8 x i16> %res
 }
 
@@ -262,9 +212,7 @@ define <vscale x 8 x i16> @sqadd_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: sqadd_i16_high
 ; CHECK: sqadd  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 1024))
   ret <vscale x 8 x i16> %res
 }
 
@@ -272,9 +220,7 @@ define <vscale x 4 x i32> @sqadd_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sqadd_i32_low
 ; CHECK: sqadd  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 30))
   ret <vscale x 4 x i32> %res
 }
 
@@ -282,9 +228,7 @@ define <vscale x 4 x i32> @sqadd_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sqadd_i32_high
 ; CHECK: sqadd  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 1024))
   ret <vscale x 4 x i32> %res
 }
 
@@ -292,9 +236,7 @@ define <vscale x 2 x i64> @sqadd_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: sqadd_i64_low
 ; CHECK: sqadd  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 30))
   ret <vscale x 2 x i64> %res
 }
 
@@ -302,9 +244,7 @@ define <vscale x 2 x i64> @sqadd_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: sqadd_i64_high
 ; CHECK: sqadd  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 1024))
   ret <vscale x 2 x i64> %res
 }
 
@@ -313,9 +253,7 @@ define <vscale x 16 x i8> @uqadd_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: uqadd_i8_low
 ; CHECK: uqadd  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
+  %res =  call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 30))
   ret <vscale x 16 x i8> %res
 }
 
@@ -323,9 +261,7 @@ define <vscale x 8 x i16> @uqadd_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: uqadd_i16_low
 ; CHECK: uqadd  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 30))
   ret <vscale x 8 x i16> %res
 }
 
@@ -333,9 +269,7 @@ define <vscale x 8 x i16> @uqadd_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: uqadd_i16_high
 ; CHECK: uqadd  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 1024))
   ret <vscale x 8 x i16> %res
 }
 
@@ -343,9 +277,7 @@ define <vscale x 4 x i32> @uqadd_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: uqadd_i32_low
 ; CHECK: uqadd  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 30))
   ret <vscale x 4 x i32> %res
 }
 
@@ -353,9 +285,7 @@ define <vscale x 4 x i32> @uqadd_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: uqadd_i32_high
 ; CHECK: uqadd  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 1024))
   ret <vscale x 4 x i32> %res
 }
 
@@ -363,9 +293,7 @@ define <vscale x 2 x i64> @uqadd_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: uqadd_i64_low
 ; CHECK: uqadd  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 30))
   ret <vscale x 2 x i64> %res
 }
 
@@ -373,9 +301,7 @@ define <vscale x 2 x i64> @uqadd_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: uqadd_i64_high
 ; CHECK: uqadd  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 1024))
   ret <vscale x 2 x i64> %res
 }
 
@@ -384,9 +310,7 @@ define <vscale x 16 x i8> @sqsub_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: sqsub_i8_low
 ; CHECK: sqsub  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
+  %res =  call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 30))
   ret <vscale x 16 x i8> %res
 }
 
@@ -394,9 +318,7 @@ define <vscale x 8 x i16> @sqsub_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: sqsub_i16_low
 ; CHECK: sqsub  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 30))
   ret <vscale x 8 x i16> %res
 }
 
@@ -404,9 +326,7 @@ define <vscale x 8 x i16> @sqsub_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: sqsub_i16_high
 ; CHECK: sqsub  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 1024))
   ret <vscale x 8 x i16> %res
 }
 
@@ -414,9 +334,7 @@ define <vscale x 4 x i32> @sqsub_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sqsub_i32_low
 ; CHECK: sqsub  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 30))
   ret <vscale x 4 x i32> %res
 }
 
@@ -424,9 +342,7 @@ define <vscale x 4 x i32> @sqsub_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sqsub_i32_high
 ; CHECK: sqsub  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 1024))
   ret <vscale x 4 x i32> %res
 }
 
@@ -434,9 +350,7 @@ define <vscale x 2 x i64> @sqsub_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: sqsub_i64_low
 ; CHECK: sqsub  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 30))
   ret <vscale x 2 x i64> %res
 }
 
@@ -444,9 +358,7 @@ define <vscale x 2 x i64> @sqsub_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: sqsub_i64_high
 ; CHECK: sqsub  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 1024))
   ret <vscale x 2 x i64> %res
 }
 
@@ -455,9 +367,7 @@ define <vscale x 16 x i8> @uqsub_i8_low(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: uqsub_i8_low
 ; CHECK: uqsub  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
+  %res =  call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 30))
   ret <vscale x 16 x i8> %res
 }
 
@@ -465,9 +375,7 @@ define <vscale x 8 x i16> @uqsub_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: uqsub_i16_low
 ; CHECK: uqsub  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 30))
   ret <vscale x 8 x i16> %res
 }
 
@@ -475,9 +383,7 @@ define <vscale x 8 x i16> @uqsub_i16_high(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: uqsub_i16_high
 ; CHECK: uqsub  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
+  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 1024))
   ret <vscale x 8 x i16> %res
 }
 
@@ -485,9 +391,7 @@ define <vscale x 4 x i32> @uqsub_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: uqsub_i32_low
 ; CHECK: uqsub  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 30))
   ret <vscale x 4 x i32> %res
 }
 
@@ -495,9 +399,7 @@ define <vscale x 4 x i32> @uqsub_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: uqsub_i32_high
 ; CHECK: uqsub  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
+  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 1024))
   ret <vscale x 4 x i32> %res
 }
 
@@ -505,9 +407,7 @@ define <vscale x 2 x i64> @uqsub_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: uqsub_i64_low
 ; CHECK: uqsub  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 30))
   ret <vscale x 2 x i64> %res
 }
 
@@ -515,9 +415,7 @@ define <vscale x 2 x i64> @uqsub_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: uqsub_i64_high
 ; CHECK: uqsub  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
+  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 1024))
   ret <vscale x 2 x i64> %res
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
index 52b56d5adb5e2e..f67029ed0c09f4 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
@@ -9,9 +9,7 @@ define <vscale x 16 x i8> @orr_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: orr_i8:
 ; CHECK: orr z0.b, z0.b, #0xf
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res = or <vscale x 16 x i8> %a, %splat
+  %res = or <vscale x 16 x i8> %a, splat(i8 15)
   ret <vscale x 16 x i8> %res
 }
 
@@ -19,9 +17,7 @@ define <vscale x 8 x i16> @orr_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: orr_i16:
 ; CHECK: orr z0.h, z0.h, #0xfc07
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = or <vscale x 8 x i16> %a, %splat
+  %res = or <vscale x 8 x i16> %a, splat(i16 64519)
   ret <vscale x 8 x i16> %res
 }
 
@@ -29,9 +25,7 @@ define <vscale x 4 x i32> @orr_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: orr_i32:
 ; CHECK: orr z0.s, z0.s, #0xffff00
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = or <vscale x 4 x i32> %a, %splat
+  %res = or <vscale x 4 x i32> %a, splat(i32 16776960)
   ret <vscale x 4 x i32> %res
 }
 
@@ -39,9 +33,7 @@ define <vscale x 2 x i64> @orr_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: orr_i64:
 ; CHECK: orr z0.d, z0.d, #0xfffc000000000000
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = or <vscale x 2 x i64> %a, %splat
+  %res = or <vscale x 2 x i64> %a, splat(i64 18445618173802708992)
   ret <vscale x 2 x i64> %res
 }
 
@@ -50,9 +42,7 @@ define <vscale x 16 x i8> @eor_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: eor_i8:
 ; CHECK: eor z0.b, z0.b, #0xf
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res = xor <vscale x 16 x i8> %a, %splat
+  %res = xor <vscale x 16 x i8> %a, splat(i8 15)
   ret <vscale x 16 x i8> %res
 }
 
@@ -60,9 +50,7 @@ define <vscale x 8 x i16> @eor_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: eor_i16:
 ; CHECK: eor z0.h, z0.h, #0xfc07
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = xor <vscale x 8 x i16> %a, %splat
+  %res = xor <vscale x 8 x i16> %a, splat(i16 64519)
   ret <vscale x 8 x i16> %res
 }
 
@@ -70,9 +58,7 @@ define <vscale x 4 x i32> @eor_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: eor_i32:
 ; CHECK: eor z0.s, z0.s, #0xffff00
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = xor <vscale x 4 x i32> %a, %splat
+  %res = xor <vscale x 4 x i32> %a, splat(i32 16776960)
   ret <vscale x 4 x i32> %res
 }
 
@@ -80,9 +66,7 @@ define <vscale x 2 x i64> @eor_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: eor_i64:
 ; CHECK: eor z0.d, z0.d, #0xfffc000000000000
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = xor <vscale x 2 x i64> %a, %splat
+  %res = xor <vscale x 2 x i64> %a, splat(i64 18445618173802708992)
   ret <vscale x 2 x i64> %res
 }
 
@@ -91,9 +75,7 @@ define <vscale x 16 x i8> @and_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: and_i8:
 ; CHECK: and z0.b, z0.b, #0xf
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res = and <vscale x 16 x i8> %a, %splat
+  %res = and <vscale x 16 x i8> %a, splat(i8 15)
   ret <vscale x 16 x i8> %res
 }
 
@@ -101,9 +83,7 @@ define <vscale x 8 x i16> @and_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: and_i16:
 ; CHECK: and z0.h, z0.h, #0xfc07
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = and <vscale x 8 x i16> %a, %splat
+  %res = and <vscale x 8 x i16> %a, splat(i16 64519)
   ret <vscale x 8 x i16> %res
 }
 
@@ -111,9 +91,7 @@ define <vscale x 4 x i32> @and_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: and_i32:
 ; CHECK: and z0.s, z0.s, #0xffff00
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = and <vscale x 4 x i32> %a, %splat
+  %res = and <vscale x 4 x i32> %a, splat(i32 16776960)
   ret <vscale x 4 x i32> %res
 }
 
@@ -121,8 +99,6 @@ define <vscale x 2 x i64> @and_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: and_i64:
 ; CHECK: and z0.d, z0.d, #0xfffc000000000000
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = and <vscale x 2 x i64> %a, %splat
+  %res = and <vscale x 2 x i64> %a, splat(i64 18445618173802708992)
   ret <vscale x 2 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll b/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
index c636f1172711a7..32760caa524ec5 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
@@ -11,12 +11,10 @@ define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 16 x i16> undef, i16 8, i64 0
-  %splat = shufflevector <vscale x 16 x i16> %insert, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
   %1 = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %2 = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %mul = mul <vscale x 16 x i16> %1, %2
-  %shr = lshr <vscale x 16 x i16> %mul, %splat
+  %shr = lshr <vscale x 16 x i16> %mul, splat(i16 8)
   %tr = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %tr
 }
@@ -27,12 +25,10 @@ define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 8 x i32> undef, i32 16, i64 0
-  %splat = shufflevector <vscale x 8 x i32> %insert, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
   %1 = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %2 = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %mul = mul <vscale x 8 x i32> %1, %2
-  %shr = lshr <vscale x 8 x i32> %mul, %splat
+  %shr = lshr <vscale x 8 x i32> %mul, splat(i32 16)
   %tr = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %tr
 }
@@ -43,12 +39,10 @@ define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 4 x i64> undef, i64 32, i64 0
-  %splat = shufflevector <vscale x 4 x i64> %insert, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %1 = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %2 = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %mul = mul <vscale x 4 x i64> %1, %2
-  %shr = lshr <vscale x 4 x i64> %mul, %splat
+  %shr = lshr <vscale x 4 x i64> %mul, splat(i64 32)
   %tr = trunc <vscale x 4 x i64> %shr to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %tr
 }
@@ -59,12 +53,10 @@ define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i128> undef, i128 64, i64 0
-  %splat = shufflevector <vscale x 2 x i128> %insert, <vscale x 2 x i128> undef, <vscale x 2 x i32> zeroinitializer
   %1 = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %2 = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %mul = mul <vscale x 2 x i128> %1, %2
-  %shr = lshr <vscale x 2 x i128> %mul, %splat
+  %shr = lshr <vscale x 2 x i128> %mul, splat(i128 64)
   %tr = trunc <vscale x 2 x i128> %shr to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %tr
 }
@@ -79,12 +71,10 @@ define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    umulh z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 16 x i16> undef, i16 8, i64 0
-  %splat = shufflevector <vscale x 16 x i16> %insert, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
   %1 = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %2 = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %mul = mul <vscale x 16 x i16> %1, %2
-  %shr = lshr <vscale x 16 x i16> %mul, %splat
+  %shr = lshr <vscale x 16 x i16> %mul, splat(i16 8)
   %tr = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %tr
 }
@@ -95,12 +85,10 @@ define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    umulh z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 8 x i32> undef, i32 16, i64 0
-  %splat = shufflevector <vscale x 8 x i32> %insert, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
   %1 = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %2 = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %mul = mul <vscale x 8 x i32> %1, %2
-  %shr = lshr <vscale x 8 x i32> %mul, %splat
+  %shr = lshr <vscale x 8 x i32> %mul, splat(i32 16)
   %tr = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %tr
 }
@@ -111,12 +99,10 @@ define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    umulh z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 4 x i64> undef, i64 32, i64 0
-  %splat = shufflevector <vscale x 4 x i64> %insert, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %1 = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %2 = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %mul = mul <vscale x 4 x i64> %1, %2
-  %shr = lshr <vscale x 4 x i64> %mul, %splat
+  %shr = lshr <vscale x 4 x i64> %mul, splat(i64 32)
   %tr = trunc <vscale x 4 x i64> %shr to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %tr
 }
@@ -127,12 +113,10 @@ define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i128> undef, i128 64, i64 0
-  %splat = shufflevector <vscale x 2 x i128> %insert, <vscale x 2 x i128> undef, <vscale x 2 x i32> zeroinitializer
   %1 = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %2 = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %mul = mul <vscale x 2 x i128> %1, %2
-  %shr = lshr <vscale x 2 x i128> %mul, %splat
+  %shr = lshr <vscale x 2 x i128> %mul, splat(i128 64)
   %tr = trunc <vscale x 2 x i128> %shr to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %tr
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-imm.ll
index eea6031fbd6b53..e1cb46e0d1dc00 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-imm.ll
@@ -8,11 +8,9 @@ define <vscale x 8 x half> @fadd_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -23,12 +21,10 @@ define <vscale x 8 x half> @fadd_h_immhalf_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -37,11 +33,9 @@ define <vscale x 8 x half> @fadd_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x h
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -51,12 +45,10 @@ define <vscale x 8 x half> @fadd_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -65,11 +57,9 @@ define <vscale x 4 x float> @fadd_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -79,12 +69,10 @@ define <vscale x 4 x float> @fadd_s_immhalf_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -93,11 +81,9 @@ define <vscale x 4 x float> @fadd_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -107,12 +93,10 @@ define <vscale x 4 x float> @fadd_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -121,11 +105,9 @@ define <vscale x 2 x double> @fadd_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -135,12 +117,10 @@ define <vscale x 2 x double> @fadd_d_immhalf_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -149,11 +129,9 @@ define <vscale x 2 x double> @fadd_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -163,12 +141,10 @@ define <vscale x 2 x double> @fadd_d_immone_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -177,11 +153,9 @@ define <vscale x 8 x half> @fmax_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -191,12 +165,10 @@ define <vscale x 8 x half> @fmax_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -205,11 +177,9 @@ define <vscale x 8 x half> @fmax_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x h
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -219,12 +189,10 @@ define <vscale x 8 x half> @fmax_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -233,11 +201,9 @@ define <vscale x 4 x float> @fmax_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -247,12 +213,10 @@ define <vscale x 4 x float> @fmax_s_immzero_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -261,11 +225,9 @@ define <vscale x 4 x float> @fmax_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -275,12 +237,10 @@ define <vscale x 4 x float> @fmax_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -289,11 +249,9 @@ define <vscale x 2 x double> @fmax_d_immzero(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -303,12 +261,10 @@ define <vscale x 2 x double> @fmax_d_immzero_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -317,11 +273,9 @@ define <vscale x 2 x double> @fmax_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -331,12 +285,10 @@ define <vscale x 2 x double> @fmax_d_immone_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -345,11 +297,9 @@ define <vscale x 8 x half> @fmaxnm_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -359,12 +309,10 @@ define <vscale x 8 x half> @fmaxnm_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a_z,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a_z,
+                                                                   <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -373,11 +321,9 @@ define <vscale x 8 x half> @fmaxnm_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -387,12 +333,10 @@ define <vscale x 8 x half> @fmaxnm_h_immone_zero(<vscale x 8 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a_z,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a_z,
+                                                                   <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -401,11 +345,9 @@ define <vscale x 4 x float> @fmaxnm_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -415,12 +357,10 @@ define <vscale x 4 x float> @fmaxnm_s_immzero_zero(<vscale x 4 x i1> %pg, <vscal
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a_z,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a_z,
+                                                                    <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -429,11 +369,9 @@ define <vscale x 4 x float> @fmaxnm_s_immone(<vscale x 4 x i1> %pg, <vscale x 4
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -443,12 +381,10 @@ define <vscale x 4 x float> @fmaxnm_s_immone_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a_z,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a_z,
+                                                                    <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -457,11 +393,9 @@ define <vscale x 2 x double> @fmaxnm_d_immzero(<vscale x 2 x i1> %pg, <vscale x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -471,12 +405,10 @@ define <vscale x 2 x double> @fmaxnm_d_immzero_zero(<vscale x 2 x i1> %pg, <vsca
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a_z,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a_z,
+                                                                     <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -485,11 +417,9 @@ define <vscale x 2 x double> @fmaxnm_d_immone(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -499,12 +429,10 @@ define <vscale x 2 x double> @fmaxnm_d_immone_zero(<vscale x 2 x i1> %pg, <vscal
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a_z,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a_z,
+                                                                     <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -513,11 +441,9 @@ define <vscale x 8 x half> @fmin_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -527,12 +453,10 @@ define <vscale x 8 x half> @fmin_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -541,11 +465,9 @@ define <vscale x 8 x half> @fmin_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x h
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -555,12 +477,10 @@ define <vscale x 8 x half> @fmin_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -569,11 +489,9 @@ define <vscale x 4 x float> @fmin_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -583,12 +501,10 @@ define <vscale x 4 x float> @fmin_s_immzero_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -597,11 +513,9 @@ define <vscale x 4 x float> @fmin_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -611,12 +525,10 @@ define <vscale x 4 x float> @fmin_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -625,11 +537,9 @@ define <vscale x 2 x double> @fmin_d_immzero(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -639,12 +549,10 @@ define <vscale x 2 x double> @fmin_d_immzero_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -653,11 +561,9 @@ define <vscale x 2 x double> @fmin_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -667,12 +573,10 @@ define <vscale x 2 x double> @fmin_d_immone_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -681,11 +585,9 @@ define <vscale x 8 x half> @fminnm_h_immzero(<vscale x 8 x i1> %pg, <vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -695,12 +597,10 @@ define <vscale x 8 x half> @fminnm_h_immzero_zero(<vscale x 8 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a_z,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a_z,
+                                                                   <vscale x 8 x half> zeroinitializer)
   ret <vscale x 8 x half> %out
 }
 
@@ -709,11 +609,9 @@ define <vscale x 8 x half> @fminnm_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -723,12 +621,10 @@ define <vscale x 8 x half> @fminnm_h_immone_zero(<vscale x 8 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
-                                                              <vscale x 8 x half> %a_z,
-                                                              <vscale x 8 x half> %splat)
+                                                                   <vscale x 8 x half> %a_z,
+                                                                   <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -737,11 +633,9 @@ define <vscale x 4 x float> @fminnm_s_immzero(<vscale x 4 x i1> %pg, <vscale x 4
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -751,12 +645,10 @@ define <vscale x 4 x float> @fminnm_s_immzero_zero(<vscale x 4 x i1> %pg, <vscal
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a_z,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a_z,
+                                                                    <vscale x 4 x float> zeroinitializer)
   ret <vscale x 4 x float> %out
 }
 
@@ -765,11 +657,9 @@ define <vscale x 4 x float> @fminnm_s_immone(<vscale x 4 x i1> %pg, <vscale x 4
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -779,12 +669,10 @@ define <vscale x 4 x float> @fminnm_s_immone_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x float> %a_z,
-                                                               <vscale x 4 x float> %splat)
+                                                                    <vscale x 4 x float> %a_z,
+                                                                    <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -793,11 +681,9 @@ define <vscale x 2 x double> @fminnm_d_immzero(<vscale x 2 x i1> %pg, <vscale x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -807,12 +693,10 @@ define <vscale x 2 x double> @fminnm_d_immzero_zero(<vscale x 2 x i1> %pg, <vsca
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, #0.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a_z,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a_z,
+                                                                     <vscale x 2 x double> zeroinitializer)
   ret <vscale x 2 x double> %out
 }
 
@@ -821,11 +705,9 @@ define <vscale x 2 x double> @fminnm_d_immone(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -835,12 +717,10 @@ define <vscale x 2 x double> @fminnm_d_immone_zero(<vscale x 2 x i1> %pg, <vscal
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x double> %a_z,
-                                                                <vscale x 2 x double> %splat)
+                                                                     <vscale x 2 x double> %a_z,
+                                                                     <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -849,11 +729,9 @@ define <vscale x 8 x half> @fmul_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -863,12 +741,10 @@ define <vscale x 8 x half> @fmul_h_immhalf_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -877,11 +753,9 @@ define <vscale x 8 x half> @fmul_h_immtwo(<vscale x 8 x i1> %pg, <vscale x 8 x h
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #2.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 2.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -891,12 +765,10 @@ define <vscale x 8 x half> @fmul_h_immtwo_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, #2.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 2.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -905,11 +777,9 @@ define <vscale x 4 x float> @fmul_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -919,12 +789,10 @@ define <vscale x 4 x float> @fmul_s_immhalf_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -933,11 +801,9 @@ define <vscale x 4 x float> @fmul_s_immtwo(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, #2.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 2.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -947,12 +813,10 @@ define <vscale x 4 x float> @fmul_s_immtwo_zero(<vscale x 4 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, #2.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 2.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -961,11 +825,9 @@ define <vscale x 2 x double> @fmul_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -975,12 +837,10 @@ define <vscale x 2 x double> @fmul_d_immhalf_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -989,11 +849,9 @@ define <vscale x 2 x double> @fmul_d_immtwo(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, #2.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 2.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1003,12 +861,10 @@ define <vscale x 2 x double> @fmul_d_immtwo_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, #2.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 2.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 2.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1017,11 +873,9 @@ define <vscale x 8 x half> @fsub_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -1031,12 +885,10 @@ define <vscale x 8 x half> @fsub_h_immhalf_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -1045,11 +897,9 @@ define <vscale x 8 x half> @fsub_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x h
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -1059,12 +909,10 @@ define <vscale x 8 x half> @fsub_h_immone_zero(<vscale x 8 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
-                                                            <vscale x 8 x half> %a_z,
-                                                            <vscale x 8 x half> %splat)
+                                                                 <vscale x 8 x half> %a_z,
+                                                                 <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -1073,11 +921,9 @@ define <vscale x 4 x float> @fsub_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -1087,12 +933,10 @@ define <vscale x 4 x float> @fsub_s_immhalf_zero(<vscale x 4 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -1101,11 +945,9 @@ define <vscale x 4 x float> @fsub_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -1115,12 +957,10 @@ define <vscale x 4 x float> @fsub_s_immone_zero(<vscale x 4 x i1> %pg, <vscale x
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
-                                                             <vscale x 4 x float> %a_z,
-                                                             <vscale x 4 x float> %splat)
+                                                                  <vscale x 4 x float> %a_z,
+                                                                  <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -1129,11 +969,9 @@ define <vscale x 2 x double> @fsub_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1143,12 +981,10 @@ define <vscale x 2 x double> @fsub_d_immhalf_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1157,11 +993,9 @@ define <vscale x 2 x double> @fsub_d_immone(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1171,12 +1005,10 @@ define <vscale x 2 x double> @fsub_d_immone_zero(<vscale x 2 x i1> %pg, <vscale
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
-                                                              <vscale x 2 x double> %a_z,
-                                                              <vscale x 2 x double> %splat)
+                                                                   <vscale x 2 x double> %a_z,
+                                                                   <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1186,12 +1018,10 @@ define <vscale x 8 x half> @fsubr_h_immhalf(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
-                                                             <vscale x 8 x half> %a_z,
-                                                             <vscale x 8 x half> %splat)
+                                                                  <vscale x 8 x half> %a_z,
+                                                                  <vscale x 8 x half> splat(half 0.500000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -1201,12 +1031,10 @@ define <vscale x 8 x half> @fsubr_h_immone(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x half> undef, half 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 8 x half> %elt, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
-                                                             <vscale x 8 x half> %a_z,
-                                                             <vscale x 8 x half> %splat)
+                                                                  <vscale x 8 x half> %a_z,
+                                                                  <vscale x 8 x half> splat(half 1.000000e+00))
   ret <vscale x 8 x half> %out
 }
 
@@ -1216,12 +1044,10 @@ define <vscale x 4 x float> @fsubr_s_immhalf(<vscale x 4 x i1> %pg, <vscale x 4
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
-                                                              <vscale x 4 x float> %a_z,
-                                                              <vscale x 4 x float> %splat)
+                                                                   <vscale x 4 x float> %a_z,
+                                                                   <vscale x 4 x float> splat(float 0.500000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -1231,12 +1057,10 @@ define <vscale x 4 x float> @fsubr_s_immone(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x float> undef, float 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 4 x float> %elt, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
-                                                              <vscale x 4 x float> %a_z,
-                                                              <vscale x 4 x float> %splat)
+                                                                   <vscale x 4 x float> %a_z,
+                                                                   <vscale x 4 x float> splat(float 1.000000e+00))
   ret <vscale x 4 x float> %out
 }
 
@@ -1246,12 +1070,10 @@ define <vscale x 2 x double> @fsubr_d_immhalf(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, #0.5
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 0.500000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
-                                                               <vscale x 2 x double> %a_z,
-                                                               <vscale x 2 x double> %splat)
+                                                                    <vscale x 2 x double> %a_z,
+                                                                    <vscale x 2 x double> splat(double 0.500000e+00))
   ret <vscale x 2 x double> %out
 }
 
@@ -1261,18 +1083,13 @@ define <vscale x 2 x double> @fsubr_d_immone(<vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, #1.0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x double> undef, double 1.000000e+00, i32 0
-  %splat = shufflevector <vscale x 2 x double> %elt, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
-                                                               <vscale x 2 x double> %a_z,
-                                                               <vscale x 2 x double> %splat)
+                                                                    <vscale x 2 x double> %a_z,
+                                                                    <vscale x 2 x double> splat(double 1.000000e+00))
   ret <vscale x 2 x double> %out
 }
 
-
-;; Arithmetic intrinsic declarations
-
 declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
 declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
 declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-index.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-index.ll
index bc94c087ef5fe7..4d4b1b67bbafcf 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-index.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-index.ll
@@ -58,11 +58,9 @@ define <vscale x 8 x i16> @index_ii_range_combine(i16 %a) {
 ; CHECK-NEXT:    index z0.h, #0, #8
 ; CHECK-NEXT:    orr z0.h, z0.h, #0x2
 ; CHECK-NEXT:    ret
-  %val = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %val1 = shufflevector <vscale x 8 x i16> %val, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
   %val2 = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 0, i16 2)
-  %val3 = shl <vscale x 8 x i16> %val2, %val1
-  %out = add <vscale x 8 x i16> %val3, %val1
+  %val3 = shl <vscale x 8 x i16> %val2, splat(i16 2)
+  %out = add <vscale x 8 x i16> %val3, splat(i16 2) 
   ret <vscale x 8 x i16> %out
 }
 
@@ -121,10 +119,8 @@ define <vscale x 4 x i32> @index_ir_range_combine(i32 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, w0
 ; CHECK-NEXT:    ret
-  %val = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %val1 = shufflevector <vscale x 4 x i32> %val, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
   %tmp = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 2, i32 1)
-  %tmp1 = sub <vscale x 4 x i32> %tmp, %val1
+  %tmp1 = sub <vscale x 4 x i32> %tmp, splat(i32 2) 
   %val2 = insertelement <vscale x 4 x i32> poison, i32 %a, i32 0
   %val3 = shufflevector <vscale x 4 x i32> %val2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
   %out = mul <vscale x 4 x i32> %tmp1, %val3

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm-zero.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm-zero.ll
index 6593978b03d41e..b22ddcb0c30105 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm-zero.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm-zero.ll
@@ -9,9 +9,7 @@ define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg)
 ; CHECK-NEXT:    asr z0.b, p0/m, z0.b, #8
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
-  %ele = insertelement <vscale x 16 x i8> poison, i8 8, i32 0
-  %shuffle = shufflevector <vscale x 16 x i8> %ele, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> %shuffle)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %res
 }
 
@@ -22,9 +20,7 @@ define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg)
 ; CHECK-NEXT:    asr z0.h, p0/m, z0.h, #16
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
-  %ele = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %shuffle = shufflevector <vscale x 8 x i16> %ele, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> %shuffle)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 16))
   ret <vscale x 8 x i16> %res
 }
 
@@ -35,9 +31,7 @@ define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg)
 ; CHECK-NEXT:    asr z0.s, p0/m, z0.s, #32
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
-  %ele = insertelement <vscale x 4 x i32> poison, i32 32, i32 0
-  %shuffle = shufflevector <vscale x 4 x i32> %ele, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> %shuffle)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 32))
   ret <vscale x 4 x i32> %res
 }
 
@@ -48,9 +42,7 @@ define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg)
 ; CHECK-NEXT:    asr z0.d, p0/m, z0.d, #64
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
-  %ele = insertelement <vscale x 2 x i64> poison, i64 64, i32 0
-  %shuffle = shufflevector <vscale x 2 x i64> %ele, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> %shuffle)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 64))
   ret <vscale x 2 x i64> %res
 }
 
@@ -62,9 +54,7 @@ define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg)
 ; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, #7
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
-  %ele = insertelement <vscale x 16 x i8> poison, i8 7, i32 0
-  %shuffle = shufflevector <vscale x 16 x i8> %ele, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> %shuffle)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 7))
   ret <vscale x 16 x i8> %res
 }
 
@@ -75,9 +65,7 @@ define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg)
 ; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, #15
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
-  %ele = insertelement <vscale x 8 x i16> poison, i16 15, i32 0
-  %shuffle = shufflevector <vscale x 8 x i16> %ele, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> %shuffle)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 15))
   ret <vscale x 8 x i16> %res
 }
 
@@ -88,9 +76,7 @@ define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg)
 ; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, #31
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
-  %ele = insertelement <vscale x 4 x i32> poison, i32 31, i32 0
-  %shuffle = shufflevector <vscale x 4 x i32> %ele, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> %shuffle)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 31))
   ret <vscale x 4 x i32> %res
 }
 
@@ -101,9 +87,7 @@ define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg)
 ; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, #63
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
-  %ele = insertelement <vscale x 2 x i64> poison, i64 63, i32 0
-  %shuffle = shufflevector <vscale x 2 x i64> %ele, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> %shuffle)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 63))
   ret <vscale x 2 x i64> %res
 }
 
@@ -115,9 +99,7 @@ define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg)
 ; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, #8
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
-  %ele = insertelement <vscale x 16 x i8> poison, i8 8, i32 0
-  %shuffle = shufflevector <vscale x 16 x i8> %ele, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> %shuffle)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %res
 }
 
@@ -128,9 +110,7 @@ define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg)
 ; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, #16
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
-  %ele = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %shuffle = shufflevector <vscale x 8 x i16> %ele, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> %shuffle)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 16))
   ret <vscale x 8 x i16> %res
 }
 
@@ -141,9 +121,7 @@ define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg)
 ; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, #32
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
-  %ele = insertelement <vscale x 4 x i32> poison, i32 32, i32 0
-  %shuffle = shufflevector <vscale x 4 x i32> %ele, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> %shuffle)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 32))
   ret <vscale x 4 x i32> %res
 }
 
@@ -154,9 +132,7 @@ define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg)
 ; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, #64
 ; CHECK-NEXT:    ret
   %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
-  %ele = insertelement <vscale x 2 x i64> poison, i64 64, i32 0
-  %shuffle = shufflevector <vscale x 2 x i64> %ele, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> %shuffle)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 64))
   ret <vscale x 2 x i64> %res
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
index 5648e8244e6ec1..73a2292b183baa 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
@@ -9,11 +9,9 @@ define <vscale x 16 x i8> @add_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    add z0.b, z0.b, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 127))
   ret <vscale x 16 x i8> %out
 }
 
@@ -23,11 +21,9 @@ define <vscale x 8 x i16> @add_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    add z0.h, z0.h, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 127))
   ret <vscale x 8 x i16> %out
 }
 
@@ -38,11 +34,9 @@ define <vscale x 8 x i16> @add_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 257))
   ret <vscale x 8 x i16> %out
 }
 
@@ -52,11 +46,9 @@ define <vscale x 4 x i32> @add_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    add z0.s, z0.s, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 127))
   ret <vscale x 4 x i32> %out
 }
 
@@ -68,11 +60,9 @@ define <vscale x 4 x i32> @add_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 257))
   ret <vscale x 4 x i32> %out
 }
 
@@ -82,11 +72,9 @@ define <vscale x 2 x i64> @add_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    add z0.d, z0.d, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 127))
   ret <vscale x 2 x i64> %out
 }
 
@@ -98,11 +86,9 @@ define <vscale x 2 x i64> @add_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    add z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 257, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 257))
   ret <vscale x 2 x i64> %out
 }
 
@@ -114,11 +100,9 @@ define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    sub z0.b, z0.b, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 127))
   ret <vscale x 16 x i8> %out
 }
 
@@ -128,11 +112,9 @@ define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sub z0.h, z0.h, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 127))
   ret <vscale x 8 x i16> %out
 }
 
@@ -143,11 +125,9 @@ define <vscale x 8 x i16> @sub_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 257))
   ret <vscale x 8 x i16> %out
 }
 
@@ -157,11 +137,9 @@ define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sub z0.s, z0.s, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 127))
   ret <vscale x 4 x i32> %out
 }
 
@@ -173,11 +151,9 @@ define <vscale x 4 x i32> @sub_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 257))
   ret <vscale x 4 x i32> %out
 }
 
@@ -187,11 +163,9 @@ define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sub z0.d, z0.d, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 127))
   ret <vscale x 2 x i64> %out
 }
 
@@ -203,11 +177,9 @@ define <vscale x 2 x i64> @sub_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 257, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 257))
   ret <vscale x 2 x i64> %out
 }
 
@@ -269,11 +241,9 @@ define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    subr z0.b, z0.b, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg,
                                                                 <vscale x 16 x i8> %a,
-                                                                <vscale x 16 x i8> %splat)
+                                                                <vscale x 16 x i8> splat(i8 127))
   ret <vscale x 16 x i8> %out
 }
 
@@ -283,11 +253,9 @@ define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    subr z0.h, z0.h, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> splat(i16 127))
   ret <vscale x 8 x i16> %out
 }
 
@@ -298,11 +266,9 @@ define <vscale x 8 x i16> @subr_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sub z0.h, z1.h, z0.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> splat(i16 257))
   ret <vscale x 8 x i16> %out
 }
 
@@ -312,11 +278,9 @@ define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    subr z0.s, z0.s, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 127))
   ret <vscale x 4 x i32> %out
 }
 
@@ -328,11 +292,9 @@ define <vscale x 4 x i32> @subr_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sub z0.s, z1.s, z0.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 257))
   ret <vscale x 4 x i32> %out
 }
 
@@ -342,11 +304,9 @@ define <vscale x 2 x i64> @subr_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    subr z0.d, z0.d, #127 // =0x7f
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> splat(i64 127))
   ret <vscale x 2 x i64> %out
 }
 
@@ -358,11 +318,9 @@ define <vscale x 2 x i64> @subr_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sub z0.d, z1.d, z0.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 257, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> splat(i64 257))
   ret <vscale x 2 x i64> %out
 }
 
@@ -424,11 +382,9 @@ define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    smax z0.b, z0.b, #-128
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 -128, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %splat)
+                                                                  <vscale x 16 x i8> splat(i8 -128))
   ret <vscale x 16 x i8> %out
 }
 
@@ -438,11 +394,9 @@ define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    smax z0.h, z0.h, #127
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 127))
   ret <vscale x 8 x i16> %out
 }
 
@@ -455,11 +409,9 @@ define <vscale x 8 x i16> @smax_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    smax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 129, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 129))
   ret <vscale x 8 x i16> %out
 }
 
@@ -469,11 +421,9 @@ define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    smax z0.s, z0.s, #-128
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -128, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 -128))
   ret <vscale x 4 x i32> %out
 }
 
@@ -485,11 +435,9 @@ define <vscale x 4 x i32> @smax_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    smax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -129, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 -129))
   ret <vscale x 4 x i32> %out
 }
 
@@ -499,11 +447,9 @@ define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    smax z0.d, z0.d, #127
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 127))
   ret <vscale x 2 x i64> %out
 }
 
@@ -515,11 +461,9 @@ define <vscale x 2 x i64> @smax_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    smax z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 65535))
   ret <vscale x 2 x i64> %out
 }
 
@@ -581,11 +525,9 @@ define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    smin z0.b, z0.b, #127
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %splat)
+                                                                  <vscale x 16 x i8> splat(i8 127))
   ret <vscale x 16 x i8> %out
 }
 
@@ -595,11 +537,9 @@ define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    smin z0.h, z0.h, #-128
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -128, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 -128))
   ret <vscale x 8 x i16> %out
 }
 
@@ -611,11 +551,9 @@ define <vscale x 8 x i16> @smin_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    smin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -129, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 -129))
   ret <vscale x 8 x i16> %out
 }
 
@@ -625,11 +563,9 @@ define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    smin z0.s, z0.s, #127
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 127))
   ret <vscale x 4 x i32> %out
 }
 
@@ -642,11 +578,9 @@ define <vscale x 4 x i32> @smin_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    smin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 257))
   ret <vscale x 4 x i32> %out
 }
 
@@ -657,11 +591,9 @@ define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    smin z0.d, z0.d, #-128
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -128, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 -128))
   ret <vscale x 2 x i64> %out
 }
 
@@ -673,11 +605,9 @@ define <vscale x 2 x i64> @smin_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    smin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -256, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 -256))
   ret <vscale x 2 x i64> %out
 }
 
@@ -739,11 +669,9 @@ define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    umax z0.b, z0.b, #0
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 0, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %splat)
+                                                                  <vscale x 16 x i8> zeroinitializer)
   ret <vscale x 16 x i8> %out
 }
 
@@ -753,11 +681,9 @@ define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    umax z0.h, z0.h, #255
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 255))
   ret <vscale x 8 x i16> %out
 }
 
@@ -769,11 +695,9 @@ define <vscale x 8 x i16> @umax_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    umax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 257))
   ret <vscale x 8 x i16> %out
 }
 
@@ -783,11 +707,9 @@ define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    umax z0.s, z0.s, #0
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 0, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> zeroinitializer)
   ret <vscale x 4 x i32> %out
 }
 
@@ -800,11 +722,9 @@ define <vscale x 4 x i32> @umax_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    umax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 257))
   ret <vscale x 4 x i32> %out
 }
 
@@ -814,11 +734,9 @@ define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    umax z0.d, z0.d, #255
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -830,11 +748,9 @@ define <vscale x 2 x i64> @umax_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    umax z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 65535))
   ret <vscale x 2 x i64> %out
 }
 
@@ -896,11 +812,9 @@ define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    umin z0.b, z0.b, #255
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 255, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %splat)
+                                                                  <vscale x 16 x i8> splat(i8 255))
   ret <vscale x 16 x i8> %out
 }
 
@@ -910,11 +824,9 @@ define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    umin z0.h, z0.h, #0
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> zeroinitializer)
   ret <vscale x 8 x i16> %out
 }
 
@@ -926,11 +838,9 @@ define <vscale x 8 x i16> @umin_i16_out_of_range(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    umin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %splat)
+                                                                  <vscale x 8 x i16> splat(i16 257))
   ret <vscale x 8 x i16> %out
 }
 
@@ -940,11 +850,9 @@ define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    umin z0.s, z0.s, #255
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 255))
   ret <vscale x 4 x i32> %out
 }
 
@@ -957,11 +865,9 @@ define <vscale x 4 x i32> @umin_i32_out_of_range(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    umin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %splat)
+                                                                  <vscale x 4 x i32> splat(i32 257))
   ret <vscale x 4 x i32> %out
 }
 
@@ -971,11 +877,9 @@ define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    umin z0.d, z0.d, #0
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 0, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 2 x i64> %out
 }
 
@@ -987,11 +891,9 @@ define <vscale x 2 x i64> @umin_i64_out_of_range(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %splat)
+                                                                  <vscale x 2 x i64> splat(i64 65535))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1052,10 +954,8 @@ define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1065,10 +965,8 @@ define <vscale x 16 x i8> @sqadd_b_negimm(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.b, z0.b, #128 // =0x80
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 -128, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 -128))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1077,10 +975,8 @@ define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1089,10 +985,8 @@ define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1102,10 +996,8 @@ define <vscale x 8 x i16> @sqadd_h_negimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.h, z0.h, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 -1))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1114,10 +1006,8 @@ define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1126,10 +1016,8 @@ define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1139,10 +1027,8 @@ define <vscale x 4 x i32> @sqadd_s_negimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.s, z0.s, #65280 // =0xff00
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -65280, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 -65280))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1151,10 +1037,8 @@ define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1163,10 +1047,8 @@ define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1176,10 +1058,8 @@ define <vscale x 2 x i64> @sqadd_d_negimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.d, z0.d, #3840 // =0xf00
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -3840, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 -3840))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1190,10 +1070,8 @@ define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1203,10 +1081,8 @@ define <vscale x 16 x i8> @sqsub_b_negimm(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.b, z0.b, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 -1))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1215,10 +1091,8 @@ define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1227,10 +1101,8 @@ define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1240,10 +1112,8 @@ define <vscale x 8 x i16> @sqsub_h_negimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.h, z0.h, #128 // =0x80
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -128, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 -128))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1252,10 +1122,8 @@ define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1264,10 +1132,8 @@ define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1277,10 +1143,8 @@ define <vscale x 4 x i32> @sqsub_s_negimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.s, z0.s, #32768 // =0x8000
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -32768, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 -32768))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1289,10 +1153,8 @@ define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1301,10 +1163,8 @@ define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqsub z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1314,10 +1174,8 @@ define <vscale x 2 x i64> @sqsub_d_negimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqadd z0.d, z0.d, #57344 // =0xe000
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -57344, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 -57344))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1328,10 +1186,8 @@ define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1340,10 +1196,8 @@ define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1352,10 +1206,8 @@ define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1364,10 +1216,8 @@ define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1376,10 +1226,8 @@ define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1388,10 +1236,8 @@ define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1400,10 +1246,8 @@ define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqadd z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1414,10 +1258,8 @@ define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1426,10 +1268,8 @@ define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1438,10 +1278,8 @@ define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1450,10 +1288,8 @@ define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1462,10 +1298,8 @@ define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1474,10 +1308,8 @@ define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1486,10 +1318,8 @@ define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uqsub z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1500,11 +1330,9 @@ define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.b, p0/m, z0.b, #8
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 9, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
                                                                <vscale x 16 x i8> %a,
-                                                               <vscale x 16 x i8> %splat)
+                                                               <vscale x 16 x i8> splat(i8 9))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1514,11 +1342,9 @@ define <vscale x 16 x i8> @asr_i8_all_active(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    asr z0.b, z0.b, #8
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1540,11 +1366,9 @@ define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.h, p0/m, z0.h, #16
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 17, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
                                                                <vscale x 8 x i16> %a,
-                                                               <vscale x 8 x i16> %splat)
+                                                               <vscale x 8 x i16> splat(i16 17))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1554,11 +1378,9 @@ define <vscale x 8 x i16> @asr_i16_all_active(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    asr z0.h, z0.h, #16
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 16))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1580,11 +1402,9 @@ define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.s, p0/m, z0.s, #32
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 33, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
                                                                <vscale x 4 x i32> %a,
-                                                               <vscale x 4 x i32> %splat)
+                                                               <vscale x 4 x i32> splat(i32 33))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1594,11 +1414,9 @@ define <vscale x 4 x i32> @asr_i32_all_active(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    asr z0.s, z0.s, #32
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 32))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1620,11 +1438,9 @@ define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.d, p0/m, z0.d, #64
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
                                                                <vscale x 2 x i64> %a,
-                                                               <vscale x 2 x i64> %splat)
+                                                               <vscale x 2 x i64> splat(i64 65))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1634,11 +1450,9 @@ define <vscale x 2 x i64> @asr_i64_all_active(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    asr z0.d, z0.d, #64
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 64))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1662,11 +1476,9 @@ define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, #7
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
                                                                <vscale x 16 x i8> %a,
-                                                               <vscale x 16 x i8> %splat)
+                                                               <vscale x 16 x i8> splat(i8 7))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1676,11 +1488,9 @@ define <vscale x 16 x i8> @lsl_i8_all_active(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    lsl z0.b, z0.b, #7
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 7))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1691,11 +1501,9 @@ define <vscale x 16 x i8> @lsl_i8_too_big(<vscale x 16 x i1> %pg, <vscale x 16 x
 ; CHECK-NEXT:    mov z1.b, #8 // =0x8
 ; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
                                                                <vscale x 16 x i8> %a,
-                                                               <vscale x 16 x i8> %splat)
+                                                               <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1715,11 +1523,9 @@ define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, #15
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
                                                                <vscale x 8 x i16> %a,
-                                                               <vscale x 8 x i16> %splat)
+                                                               <vscale x 8 x i16> splat(i16 15))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1729,11 +1535,9 @@ define <vscale x 8 x i16> @lsl_i16_all_active(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    lsl z0.h, z0.h, #15
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 15))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1744,11 +1548,9 @@ define <vscale x 8 x i16> @lsl_i16_too_big(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK-NEXT:    mov z1.h, #16 // =0x10
 ; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
                                                                <vscale x 8 x i16> %a,
-                                                               <vscale x 8 x i16> %splat)
+                                                               <vscale x 8 x i16> splat(i16 16))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1768,11 +1570,9 @@ define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, #31
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
                                                                <vscale x 4 x i32> %a,
-                                                               <vscale x 4 x i32> %splat)
+                                                               <vscale x 4 x i32> splat(i32 31))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1782,11 +1582,9 @@ define <vscale x 4 x i32> @lsl_i32_all_active(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    lsl z0.s, z0.s, #31
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 31))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1797,11 +1595,9 @@ define <vscale x 4 x i32> @lsl_i32_too_big(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK-NEXT:    mov z1.s, #32 // =0x20
 ; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
                                                                <vscale x 4 x i32> %a,
-                                                               <vscale x 4 x i32> %splat)
+                                                               <vscale x 4 x i32> splat(i32 32))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1821,11 +1617,9 @@ define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, #63
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 63, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
                                                                <vscale x 2 x i64> %a,
-                                                               <vscale x 2 x i64> %splat)
+                                                               <vscale x 2 x i64> splat(i64 63))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1835,11 +1629,9 @@ define <vscale x 2 x i64> @lsl_i64_all_active(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    lsl z0.d, z0.d, #63
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 63, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 63))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1850,11 +1642,9 @@ define <vscale x 2 x i64> @lsl_i64_too_big(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK-NEXT:    mov z1.d, #64 // =0x40
 ; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
                                                                <vscale x 2 x i64> %a,
-                                                               <vscale x 2 x i64> %splat)
+                                                               <vscale x 2 x i64> splat(i64 64))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1876,11 +1666,9 @@ define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, #8
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 9, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
                                                                <vscale x 16 x i8> %a,
-                                                               <vscale x 16 x i8> %splat)
+                                                               <vscale x 16 x i8> splat(i8 9))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1890,11 +1678,9 @@ define <vscale x 16 x i8> @lsr_i8_all_active(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    lsr z0.b, z0.b, #8
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1916,11 +1702,9 @@ define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, #16
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 17, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
                                                                <vscale x 8 x i16> %a,
-                                                               <vscale x 8 x i16> %splat)
+                                                               <vscale x 8 x i16> splat(i16 17))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1930,11 +1714,9 @@ define <vscale x 8 x i16> @lsr_i16_all_active(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    lsr z0.h, z0.h, #16
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 16))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1956,11 +1738,9 @@ define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, #32
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 33, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
                                                                <vscale x 4 x i32> %a,
-                                                               <vscale x 4 x i32> %splat)
+                                                               <vscale x 4 x i32> splat(i32 33))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1970,11 +1750,9 @@ define <vscale x 4 x i32> @lsr_i32_all_active(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    lsr z0.s, z0.s, #32
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 32))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1996,11 +1774,9 @@ define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, #64
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
                                                                <vscale x 2 x i64> %a,
-                                                               <vscale x 2 x i64> %splat)
+                                                               <vscale x 2 x i64> splat(i64 65))
   ret <vscale x 2 x i64> %out
 }
 
@@ -2010,11 +1786,9 @@ define <vscale x 2 x i64> @lsr_i64_all_active(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    lsr z0.d, z0.d, #64
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 64))
   ret <vscale x 2 x i64> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-undef.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-undef.ll
index 929f8f5f54e01d..3c001a87083d40 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-undef.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-undef.ll
@@ -61,11 +61,9 @@ define <vscale x 16 x i8> @add_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add z0.b, z0.b, #3 // =0x3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -74,11 +72,9 @@ define <vscale x 8 x i16> @add_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add z0.h, z0.h, #4 // =0x4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -87,11 +83,9 @@ define <vscale x 4 x i32> @add_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add z0.s, z0.s, #5 // =0x5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -100,11 +94,9 @@ define <vscale x 2 x i64> @add_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add z0.d, z0.d, #6 // =0x6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -289,11 +281,9 @@ define <vscale x 16 x i8> @mul_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.b, z0.b, #3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -302,11 +292,9 @@ define <vscale x 8 x i16> @mul_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.h, z0.h, #4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -315,11 +303,9 @@ define <vscale x 4 x i32> @mul_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.s, z0.s, #5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -328,11 +314,9 @@ define <vscale x 2 x i64> @mul_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.d, z0.d, #6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -497,11 +481,9 @@ define <vscale x 16 x i8> @smax_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.b, z0.b, #3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %imm.splat)
+                                                                  <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -510,11 +492,9 @@ define <vscale x 8 x i16> @smax_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.h, z0.h, #4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %imm.splat)
+                                                                  <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -523,11 +503,9 @@ define <vscale x 4 x i32> @smax_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.s, z0.s, #5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %imm.splat)
+                                                                  <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -536,11 +514,9 @@ define <vscale x 2 x i64> @smax_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smax z0.d, z0.d, #6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %imm.splat)
+                                                                  <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -601,11 +577,9 @@ define <vscale x 16 x i8> @smin_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.b, z0.b, #3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %imm.splat)
+                                                                  <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -614,11 +588,9 @@ define <vscale x 8 x i16> @smin_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.h, z0.h, #4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %imm.splat)
+                                                                  <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -627,11 +599,9 @@ define <vscale x 4 x i32> @smin_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.s, z0.s, #5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %imm.splat)
+                                                                  <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -640,11 +610,9 @@ define <vscale x 2 x i64> @smin_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smin z0.d, z0.d, #6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %imm.splat)
+                                                                  <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -773,11 +741,9 @@ define <vscale x 16 x i8> @sub_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub z0.b, z0.b, #3 // =0x3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -786,11 +752,9 @@ define <vscale x 8 x i16> @sub_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub z0.h, z0.h, #4 // =0x4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -799,11 +763,9 @@ define <vscale x 4 x i32> @sub_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub z0.s, z0.s, #5 // =0x5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -812,11 +774,9 @@ define <vscale x 2 x i64> @sub_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub z0.d, z0.d, #6 // =0x6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -877,10 +837,8 @@ define <vscale x 16 x i8> @subr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subr z0.b, z0.b, #3 // =0x3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
-                                                                 <vscale x 16 x i8> %imm.splat,
+                                                                 <vscale x 16 x i8> splat(i8 3),
                                                                  <vscale x 16 x i8> %a)
   ret <vscale x 16 x i8> %out
 }
@@ -890,10 +848,8 @@ define <vscale x 8 x i16> @subr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subr z0.h, z0.h, #4 // =0x4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
-                                                                 <vscale x 8 x i16> %imm.splat,
+                                                                 <vscale x 8 x i16> splat(i16 4),
                                                                  <vscale x 8 x i16> %a)
   ret <vscale x 8 x i16> %out
 }
@@ -903,10 +859,8 @@ define <vscale x 4 x i32> @subr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subr z0.s, z0.s, #5 // =0x5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                 <vscale x 4 x i32> %imm.splat,
+                                                                 <vscale x 4 x i32> splat(i32 5),
                                                                  <vscale x 4 x i32> %a)
   ret <vscale x 4 x i32> %out
 }
@@ -916,10 +870,8 @@ define <vscale x 2 x i64> @subr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subr z0.d, z0.d, #6 // =0x6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                 <vscale x 2 x i64> %imm.splat,
+                                                                 <vscale x 2 x i64> splat(i64 6),
                                                                  <vscale x 2 x i64> %a)
   ret <vscale x 2 x i64> %out
 }
@@ -1085,11 +1037,9 @@ define <vscale x 16 x i8> @umax_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.b, z0.b, #3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %imm.splat)
+                                                                  <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1098,11 +1048,9 @@ define <vscale x 8 x i16> @umax_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.h, z0.h, #4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %imm.splat)
+                                                                  <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1111,11 +1059,9 @@ define <vscale x 4 x i32> @umax_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.s, z0.s, #5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %imm.splat)
+                                                                  <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1124,11 +1070,9 @@ define <vscale x 2 x i64> @umax_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umax z0.d, z0.d, #6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %imm.splat)
+                                                                  <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -1189,11 +1133,9 @@ define <vscale x 16 x i8> @umin_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.b, z0.b, #3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                   <vscale x 16 x i8> %a,
-                                                                  <vscale x 16 x i8> %imm.splat)
+                                                                  <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -1202,11 +1144,9 @@ define <vscale x 8 x i16> @umin_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.h, z0.h, #4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                   <vscale x 8 x i16> %a,
-                                                                  <vscale x 8 x i16> %imm.splat)
+                                                                  <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -1215,11 +1155,9 @@ define <vscale x 4 x i32> @umin_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.s, z0.s, #5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                   <vscale x 4 x i32> %a,
-                                                                  <vscale x 4 x i32> %imm.splat)
+                                                                  <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -1228,11 +1166,9 @@ define <vscale x 2 x i64> @umin_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.d, z0.d, #6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                   <vscale x 2 x i64> %a,
-                                                                  <vscale x 2 x i64> %imm.splat)
+                                                                  <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
index cefa3f9d825a33..bb72f0506690b3 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
@@ -15,9 +15,7 @@ define <vscale x 16 x i1> @ir_cmpeq_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp eq <vscale x 16 x i8> %a, %splat
+  %out = icmp eq <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -26,11 +24,9 @@ define <vscale x 16 x i1> @int_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -39,11 +35,9 @@ define <vscale x 16 x i1> @wide_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -53,9 +47,7 @@ define <vscale x 8 x i1> @ir_cmpeq_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp eq <vscale x 8 x i16> %a, %splat
+  %out = icmp eq <vscale x 8 x i16> %a, splat(i16 -16)
   ret <vscale x 8 x i1> %out
 }
 
@@ -64,11 +56,9 @@ define <vscale x 8 x i1> @int_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> splat(i16 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -77,11 +67,9 @@ define <vscale x 8 x i1> @wide_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -91,9 +79,7 @@ define <vscale x 4 x i1> @ir_cmpeq_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp eq <vscale x 4 x i32> %a, %splat
+  %out = icmp eq <vscale x 4 x i32> %a, splat(i32 15)
   ret <vscale x 4 x i1> %out
 }
 
@@ -102,11 +88,9 @@ define <vscale x 4 x i1> @int_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -115,11 +99,9 @@ define <vscale x 4 x i1> @wide_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -129,9 +111,7 @@ define <vscale x 2 x i1> @ir_cmpeq_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp eq <vscale x 2 x i64> %a, %splat
+  %out = icmp eq <vscale x 2 x i64> %a, zeroinitializer
   ret <vscale x 2 x i1> %out
 }
 
@@ -140,11 +120,9 @@ define <vscale x 2 x i1> @int_cmpeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 2 x i1> %out
 }
 
@@ -158,9 +136,7 @@ define <vscale x 16 x i1> @ir_cmpge_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpge p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp sge <vscale x 16 x i8> %a, %splat
+  %out = icmp sge <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -169,11 +145,9 @@ define <vscale x 16 x i1> @int_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -182,11 +156,9 @@ define <vscale x 16 x i1> @wide_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -196,9 +168,7 @@ define <vscale x 8 x i1> @ir_cmpge_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpge p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp sge <vscale x 8 x i16> %a, %splat
+  %out = icmp sge <vscale x 8 x i16> %a, splat(i16 -16)
   ret <vscale x 8 x i1> %out
 }
 
@@ -207,11 +177,9 @@ define <vscale x 8 x i1> @int_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> splat(i16 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -220,11 +188,9 @@ define <vscale x 8 x i1> @wide_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -234,9 +200,7 @@ define <vscale x 4 x i1> @ir_cmpge_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp sge <vscale x 4 x i32> %a, %splat
+  %out = icmp sge <vscale x 4 x i32> %a, splat(i32 15)
   ret <vscale x 4 x i1> %out
 }
 
@@ -245,11 +209,9 @@ define <vscale x 4 x i1> @int_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -258,11 +220,9 @@ define <vscale x 4 x i1> @wide_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -272,9 +232,7 @@ define <vscale x 2 x i1> @ir_cmpge_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpge p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp sge <vscale x 2 x i64> %a, %splat
+  %out = icmp sge <vscale x 2 x i64> %a, zeroinitializer
   ret <vscale x 2 x i1> %out
 }
 
@@ -283,11 +241,9 @@ define <vscale x 2 x i1> @int_cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 2 x i1> %out
 }
 
@@ -301,9 +257,7 @@ define <vscale x 16 x i1> @ir_cmpgt_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpgt p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp sgt <vscale x 16 x i8> %a, %splat
+  %out = icmp sgt <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -312,11 +266,9 @@ define <vscale x 16 x i1> @int_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -325,11 +277,9 @@ define <vscale x 16 x i1> @wide_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -339,9 +289,7 @@ define <vscale x 8 x i1> @ir_cmpgt_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp sgt <vscale x 8 x i16> %a, %splat
+  %out = icmp sgt <vscale x 8 x i16> %a, splat(i16 -16)
   ret <vscale x 8 x i1> %out
 }
 
@@ -350,11 +298,9 @@ define <vscale x 8 x i1> @int_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> splat(i16 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -363,11 +309,9 @@ define <vscale x 8 x i1> @wide_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -377,9 +321,7 @@ define <vscale x 4 x i1> @ir_cmpgt_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpgt p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp sgt <vscale x 4 x i32> %a, %splat
+  %out = icmp sgt <vscale x 4 x i32> %a, splat(i32 15)
   ret <vscale x 4 x i1> %out
 }
 
@@ -388,11 +330,9 @@ define <vscale x 4 x i1> @int_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -401,11 +341,9 @@ define <vscale x 4 x i1> @wide_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -415,9 +353,7 @@ define <vscale x 2 x i1> @ir_cmpgt_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpgt p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp sgt <vscale x 2 x i64> %a, %splat
+  %out = icmp sgt <vscale x 2 x i64> %a, zeroinitializer
   ret <vscale x 2 x i1> %out
 }
 
@@ -426,11 +362,9 @@ define <vscale x 2 x i1> @int_cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpgt p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 2 x i1> %out
 }
 
@@ -444,9 +378,7 @@ define <vscale x 16 x i1> @ir_cmple_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmple p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp sle <vscale x 16 x i8> %a, %splat
+  %out = icmp sle <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -455,10 +387,8 @@ define <vscale x 16 x i1> @int_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
-                                                                 <vscale x 16 x i8> %splat,
+                                                                 <vscale x 16 x i8> splat(i8 4),
                                                                  <vscale x 16 x i8> %a)
   ret <vscale x 16 x i1> %out
 }
@@ -468,11 +398,9 @@ define <vscale x 16 x i1> @wide_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -482,9 +410,7 @@ define <vscale x 8 x i1> @ir_cmple_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmple p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp sle <vscale x 8 x i16> %a, %splat
+  %out = icmp sle <vscale x 8 x i16> %a, splat(i16 -16)
   ret <vscale x 8 x i1> %out
 }
 
@@ -493,10 +419,8 @@ define <vscale x 8 x i1> @int_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
-                                                                <vscale x 8 x i16> %splat,
+                                                                <vscale x 8 x i16> splat(i16 -16),
                                                                 <vscale x 8 x i16> %a)
   ret <vscale x 8 x i1> %out
 }
@@ -506,11 +430,9 @@ define <vscale x 8 x i1> @wide_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -520,9 +442,7 @@ define <vscale x 4 x i1> @ir_cmple_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmple p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp sle <vscale x 4 x i32> %a, %splat
+  %out = icmp sle <vscale x 4 x i32> %a, splat(i32 15)
   ret <vscale x 4 x i1> %out
 }
 
@@ -531,10 +451,8 @@ define <vscale x 4 x i1> @int_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                <vscale x 4 x i32> %splat,
+                                                                <vscale x 4 x i32> splat(i32 15),
                                                                 <vscale x 4 x i32> %a)
   ret <vscale x 4 x i1> %out
 }
@@ -544,11 +462,9 @@ define <vscale x 4 x i1> @wide_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -558,9 +474,7 @@ define <vscale x 2 x i1> @ir_cmple_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmple p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp sle <vscale x 2 x i64> %a, %splat
+  %out = icmp sle <vscale x 2 x i64> %a, zeroinitializer
   ret <vscale x 2 x i1> %out
 }
 
@@ -569,10 +483,8 @@ define <vscale x 2 x i1> @int_cmple_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmple p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x i64> %splat,
+                                                                <vscale x 2 x i64> zeroinitializer,
                                                                 <vscale x 2 x i64> %a)
   ret <vscale x 2 x i1> %out
 }
@@ -587,9 +499,7 @@ define <vscale x 16 x i1> @ir_cmplt_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmplt p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp slt <vscale x 16 x i8> %a, %splat
+  %out = icmp slt <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -598,10 +508,8 @@ define <vscale x 16 x i1> @int_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
-                                                                 <vscale x 16 x i8> %splat,
+                                                                 <vscale x 16 x i8> splat(i8 4),
                                                                  <vscale x 16 x i8> %a)
   ret <vscale x 16 x i1> %out
 }
@@ -611,11 +519,9 @@ define <vscale x 16 x i1> @wide_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -625,9 +531,7 @@ define <vscale x 8 x i1> @ir_cmplt_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmplt p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp slt <vscale x 8 x i16> %a, %splat
+  %out = icmp slt <vscale x 8 x i16> %a, splat(i16 -16)
   ret <vscale x 8 x i1> %out
 }
 
@@ -636,10 +540,8 @@ define <vscale x 8 x i1> @int_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
-                                                                <vscale x 8 x i16> %splat,
+                                                                <vscale x 8 x i16> splat(i16 -16),
                                                                 <vscale x 8 x i16> %a)
   ret <vscale x 8 x i1> %out
 }
@@ -649,11 +551,9 @@ define <vscale x 8 x i1> @wide_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -663,9 +563,7 @@ define <vscale x 4 x i1> @ir_cmplt_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmplt p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp slt <vscale x 4 x i32> %a, %splat
+  %out = icmp slt <vscale x 4 x i32> %a, splat(i32 15)
   ret <vscale x 4 x i1> %out
 }
 
@@ -674,10 +572,8 @@ define <vscale x 4 x i1> @int_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                <vscale x 4 x i32> %splat,
+                                                                <vscale x 4 x i32> splat(i32 15),
                                                                 <vscale x 4 x i32> %a)
   ret <vscale x 4 x i1> %out
 }
@@ -687,11 +583,9 @@ define <vscale x 4 x i1> @wide_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -701,9 +595,7 @@ define <vscale x 2 x i1> @ir_cmplt_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmplt p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp slt <vscale x 2 x i64> %a, %splat
+  %out = icmp slt <vscale x 2 x i64> %a, zeroinitializer
   ret <vscale x 2 x i1> %out
 }
 
@@ -712,10 +604,8 @@ define <vscale x 2 x i1> @int_cmplt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplt p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x i64> %splat,
+                                                                <vscale x 2 x i64> zeroinitializer,
                                                                 <vscale x 2 x i64> %a)
   ret <vscale x 2 x i1> %out
 }
@@ -730,9 +620,7 @@ define <vscale x 16 x i1> @ir_cmpne_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpne p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp ne <vscale x 16 x i8> %a, %splat
+  %out = icmp ne <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -741,11 +629,9 @@ define <vscale x 16 x i1> @int_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -754,11 +640,9 @@ define <vscale x 16 x i1> @wide_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -768,9 +652,7 @@ define <vscale x 8 x i1> @ir_cmpne_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp ne <vscale x 8 x i16> %a, %splat
+  %out = icmp ne <vscale x 8 x i16> %a, splat(i16 -16)
   ret <vscale x 8 x i1> %out
 }
 
@@ -779,11 +661,9 @@ define <vscale x 8 x i1> @int_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> splat(i16 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -792,11 +672,9 @@ define <vscale x 8 x i1> @wide_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #-16
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 -16))
   ret <vscale x 8 x i1> %out
 }
 
@@ -806,9 +684,7 @@ define <vscale x 4 x i1> @ir_cmpne_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp ne <vscale x 4 x i32> %a, %splat
+  %out = icmp ne <vscale x 4 x i32> %a, splat(i32 15)
   ret <vscale x 4 x i1> %out
 }
 
@@ -817,11 +693,9 @@ define <vscale x 4 x i1> @int_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -830,11 +704,9 @@ define <vscale x 4 x i1> @wide_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #15
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 15))
   ret <vscale x 4 x i1> %out
 }
 
@@ -844,9 +716,7 @@ define <vscale x 2 x i1> @ir_cmpne_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp ne <vscale x 2 x i64> %a, %splat
+  %out = icmp ne <vscale x 2 x i64> %a, zeroinitializer
   ret <vscale x 2 x i1> %out
 }
 
@@ -855,11 +725,9 @@ define <vscale x 2 x i1> @int_cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 2 x i1> %out
 }
 
@@ -877,9 +745,7 @@ define <vscale x 16 x i1> @ir_cmphi_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmphi p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp ugt <vscale x 16 x i8> %a, %splat
+  %out = icmp ugt <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -888,11 +754,9 @@ define <vscale x 16 x i1> @int_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -901,11 +765,9 @@ define <vscale x 16 x i1> @wide_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -915,9 +777,7 @@ define <vscale x 8 x i1> @ir_cmphi_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp ugt <vscale x 8 x i16> %a, %splat
+  %out = icmp ugt <vscale x 8 x i16> %a, zeroinitializer
   ret <vscale x 8 x i1> %out
 }
 
@@ -926,11 +786,9 @@ define <vscale x 8 x i1> @int_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> zeroinitializer)
   ret <vscale x 8 x i1> %out
 }
 
@@ -939,11 +797,9 @@ define <vscale x 8 x i1> @wide_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 8 x i1> %out
 }
 
@@ -953,9 +809,7 @@ define <vscale x 4 x i1> @ir_cmphi_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmphi p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp ugt <vscale x 4 x i32> %a, %splat
+  %out = icmp ugt <vscale x 4 x i32> %a, splat(i32 68)
   ret <vscale x 4 x i1> %out
 }
 
@@ -964,11 +818,9 @@ define <vscale x 4 x i1> @int_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 68))
   ret <vscale x 4 x i1> %out
 }
 
@@ -977,11 +829,9 @@ define <vscale x 4 x i1> @wide_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 68))
   ret <vscale x 4 x i1> %out
 }
 
@@ -991,9 +841,7 @@ define <vscale x 2 x i1> @ir_cmphi_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp ugt <vscale x 2 x i64> %a, %splat
+  %out = icmp ugt <vscale x 2 x i64> %a, splat(i64 127)
   ret <vscale x 2 x i1> %out
 }
 
@@ -1002,11 +850,9 @@ define <vscale x 2 x i1> @int_cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> splat(i64 127))
   ret <vscale x 2 x i1> %out
 }
 
@@ -1020,9 +866,7 @@ define <vscale x 16 x i1> @ir_cmphs_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmphs p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp uge <vscale x 16 x i8> %a, %splat
+  %out = icmp uge <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -1031,11 +875,9 @@ define <vscale x 16 x i1> @int_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -1044,11 +886,9 @@ define <vscale x 16 x i1> @wide_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -1057,9 +897,7 @@ define <vscale x 8 x i1> @ir_cmphs_h(<vscale x 8 x i16> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp uge <vscale x 8 x i16> %a, %splat
+  %out = icmp uge <vscale x 8 x i16> %a, zeroinitializer
   ret <vscale x 8 x i1> %out
 }
 
@@ -1068,11 +906,9 @@ define <vscale x 8 x i1> @int_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
                                                                 <vscale x 8 x i16> %a,
-                                                                <vscale x 8 x i16> %splat)
+                                                                <vscale x 8 x i16> zeroinitializer)
   ret <vscale x 8 x i1> %out
 }
 
@@ -1081,11 +917,9 @@ define <vscale x 8 x i1> @wide_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 8 x i1> %out
 }
 
@@ -1095,9 +929,7 @@ define <vscale x 4 x i1> @ir_cmphs_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmphs p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp uge <vscale x 4 x i32> %a, %splat
+  %out = icmp uge <vscale x 4 x i32> %a, splat(i32 68)
   ret <vscale x 4 x i1> %out
 }
 
@@ -1106,11 +938,9 @@ define <vscale x 4 x i1> @int_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
                                                                 <vscale x 4 x i32> %a,
-                                                                <vscale x 4 x i32> %splat)
+                                                                <vscale x 4 x i32> splat(i32 68))
   ret <vscale x 4 x i1> %out
 }
 
@@ -1119,11 +949,9 @@ define <vscale x 4 x i1> @wide_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 68))
   ret <vscale x 4 x i1> %out
 }
 
@@ -1133,9 +961,7 @@ define <vscale x 2 x i1> @ir_cmphs_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmphs p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp uge <vscale x 2 x i64> %a, %splat
+  %out = icmp uge <vscale x 2 x i64> %a, splat(i64 127)
   ret <vscale x 2 x i1> %out
 }
 
@@ -1144,11 +970,9 @@ define <vscale x 2 x i1> @int_cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmphs p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
                                                                 <vscale x 2 x i64> %a,
-                                                                <vscale x 2 x i64> %splat)
+                                                                <vscale x 2 x i64> splat(i64 127))
   ret <vscale x 2 x i1> %out
 }
 
@@ -1162,9 +986,7 @@ define <vscale x 16 x i1> @ir_cmplo_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmplo p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp ult <vscale x 16 x i8> %a, %splat
+  %out = icmp ult <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -1173,10 +995,8 @@ define <vscale x 16 x i1> @int_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
-                                                                 <vscale x 16 x i8> %splat,
+                                                                 <vscale x 16 x i8> splat(i8 4),
                                                                  <vscale x 16 x i8> %a)
   ret <vscale x 16 x i1> %out
 }
@@ -1186,11 +1006,9 @@ define <vscale x 16 x i1> @wide_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -1200,9 +1018,7 @@ define <vscale x 8 x i1> @ir_cmplo_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmplo p0.h, p0/z, z0.h, #2
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp ult <vscale x 8 x i16> %a, %splat
+  %out = icmp ult <vscale x 8 x i16> %a, splat(i16 2)
   ret <vscale x 8 x i1> %out
 }
 
@@ -1211,10 +1027,8 @@ define <vscale x 8 x i1> @int_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.h, p0/z, z0.h, #3
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
-                                                                <vscale x 8 x i16> %splat,
+                                                                <vscale x 8 x i16> splat(i16 3),
                                                                 <vscale x 8 x i16> %a)
   ret <vscale x 8 x i1> %out
 }
@@ -1224,11 +1038,9 @@ define <vscale x 8 x i1> @wide_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.h, p0/z, z0.h, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 8 x i1> %out
 }
 
@@ -1238,9 +1050,7 @@ define <vscale x 4 x i1> @ir_cmplo_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmplo p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp ult <vscale x 4 x i32> %a, %splat
+  %out = icmp ult <vscale x 4 x i32> %a, splat(i32 68)
   ret <vscale x 4 x i1> %out
 }
 
@@ -1249,10 +1059,8 @@ define <vscale x 4 x i1> @int_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                <vscale x 4 x i32> %splat,
+                                                                <vscale x 4 x i32> splat(i32 68),
                                                                 <vscale x 4 x i32> %a)
   ret <vscale x 4 x i1> %out
 }
@@ -1262,11 +1070,9 @@ define <vscale x 4 x i1> @wide_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 68))
   ret <vscale x 4 x i1> %out
 }
 
@@ -1276,9 +1082,7 @@ define <vscale x 2 x i1> @ir_cmplo_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmplo p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp ult <vscale x 2 x i64> %a, %splat
+  %out = icmp ult <vscale x 2 x i64> %a, splat(i64 127)
   ret <vscale x 2 x i1> %out
 }
 
@@ -1287,10 +1091,8 @@ define <vscale x 2 x i1> @int_cmplo_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmplo p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x i64> %splat,
+                                                                <vscale x 2 x i64> splat(i64 127),
                                                                 <vscale x 2 x i64> %a)
   ret <vscale x 2 x i1> %out
 }
@@ -1305,9 +1107,7 @@ define <vscale x 16 x i1> @ir_cmpls_b(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpls p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %out = icmp ule <vscale x 16 x i8> %a, %splat
+  %out = icmp ule <vscale x 16 x i8> %a, splat(i8 4)
   ret <vscale x 16 x i1> %out
 }
 
@@ -1316,10 +1116,8 @@ define <vscale x 16 x i1> @int_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
-                                                                 <vscale x 16 x i8> %splat,
+                                                                 <vscale x 16 x i8> splat(i8 4),
                                                                  <vscale x 16 x i8> %a)
   ret <vscale x 16 x i1> %out
 }
@@ -1329,11 +1127,9 @@ define <vscale x 16 x i1> @wide_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.b, p0/z, z0.b, #4
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg,
                                                                       <vscale x 16 x i8> %a,
-                                                                      <vscale x 2 x i64> %splat)
+                                                                      <vscale x 2 x i64> splat(i64 4))
   ret <vscale x 16 x i1> %out
 }
 
@@ -1343,9 +1139,7 @@ define <vscale x 8 x i1> @ir_cmpls_h(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpls p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %out = icmp ule <vscale x 8 x i16> %a, %splat
+  %out = icmp ule <vscale x 8 x i16> %a, zeroinitializer
   ret <vscale x 8 x i1> %out
 }
 
@@ -1354,10 +1148,8 @@ define <vscale x 8 x i1> @int_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
-                                                                <vscale x 8 x i16> %splat,
+                                                                <vscale x 8 x i16> zeroinitializer,
                                                                 <vscale x 8 x i16> %a)
   ret <vscale x 8 x i1> %out
 }
@@ -1367,11 +1159,9 @@ define <vscale x 8 x i1> @wide_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %pg,
                                                                      <vscale x 8 x i16> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> zeroinitializer)
   ret <vscale x 8 x i1> %out
 }
 
@@ -1381,9 +1171,7 @@ define <vscale x 4 x i1> @ir_cmpls_s(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpls p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %out = icmp ule <vscale x 4 x i32> %a, %splat
+  %out = icmp ule <vscale x 4 x i32> %a, splat(i32 68)
   ret <vscale x 4 x i1> %out
 }
 
@@ -1392,10 +1180,8 @@ define <vscale x 4 x i1> @int_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                <vscale x 4 x i32> %splat,
+                                                                <vscale x 4 x i32> splat(i32 68),
                                                                 <vscale x 4 x i32> %a)
   ret <vscale x 4 x i1> %out
 }
@@ -1405,11 +1191,9 @@ define <vscale x 4 x i1> @wide_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.s, p0/z, z0.s, #68
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
                                                                      <vscale x 4 x i32> %a,
-                                                                     <vscale x 2 x i64> %splat)
+                                                                     <vscale x 2 x i64> splat(i64 68))
   ret <vscale x 4 x i1> %out
 }
 
@@ -1419,9 +1203,7 @@ define <vscale x 2 x i1> @ir_cmpls_d(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpls p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %out = icmp ule <vscale x 2 x i64> %a, %splat
+  %out = icmp ule <vscale x 2 x i64> %a, splat(i64 127)
   ret <vscale x 2 x i1> %out
 }
 
@@ -1430,10 +1212,8 @@ define <vscale x 2 x i1> @int_cmpls_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpls p0.d, p0/z, z0.d, #127
 ; CHECK-NEXT:    ret
-  %elt   = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                <vscale x 2 x i64> %splat,
+                                                                <vscale x 2 x i64> splat(i64 127),
                                                                 <vscale x 2 x i64> %a)
   ret <vscale x 2 x i1> %out
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-logical-undef.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-logical-undef.ll
index 07413c5a1617d8..d3f4f89b60af3e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-logical-undef.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-logical-undef.ll
@@ -60,11 +60,9 @@ define <vscale x 16 x i8> @and_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.b, z0.b, #0x3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -73,11 +71,9 @@ define <vscale x 8 x i16> @and_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.h, z0.h, #0x4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -86,11 +82,9 @@ define <vscale x 4 x i32> @and_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.s, z0.s, #0x10
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 16))
   ret <vscale x 4 x i32> %out
 }
 
@@ -99,11 +93,9 @@ define <vscale x 2 x i64> @and_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.d, z0.d, #0x20
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 32))
   ret <vscale x 2 x i64> %out
 }
 
@@ -164,11 +156,9 @@ define <vscale x 16 x i8> @eor_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor z0.b, z0.b, #0x7
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 7))
   ret <vscale x 16 x i8> %out
 }
 
@@ -177,11 +167,9 @@ define <vscale x 8 x i16> @eor_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor z0.h, z0.h, #0x8
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 8, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 8))
   ret <vscale x 8 x i16> %out
 }
 
@@ -190,11 +178,9 @@ define <vscale x 4 x i32> @eor_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor z0.s, z0.s, #0x10
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 16))
   ret <vscale x 4 x i32> %out
 }
 
@@ -203,11 +189,9 @@ define <vscale x 2 x i64> @eor_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor z0.d, z0.d, #0x20
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 32))
   ret <vscale x 2 x i64> %out
 }
 
@@ -268,11 +252,9 @@ define <vscale x 16 x i8> @orr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr z0.b, z0.b, #0x8
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %out
 }
 
@@ -281,11 +263,9 @@ define <vscale x 8 x i16> @orr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr z0.h, z0.h, #0xc
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 12, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 12))
   ret <vscale x 8 x i16> %out
 }
 
@@ -294,11 +274,9 @@ define <vscale x 4 x i32> @orr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr z0.s, z0.s, #0x10
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 16))
   ret <vscale x 4 x i32> %out
 }
 
@@ -307,11 +285,9 @@ define <vscale x 2 x i64> @orr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr z0.d, z0.d, #0x20
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 32))
   ret <vscale x 2 x i64> %out
 }
 
@@ -372,11 +348,9 @@ define <vscale x 16 x i8> @bic_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.b, z0.b, #0xf8
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 7))
   ret <vscale x 16 x i8> %out
 }
 
@@ -385,11 +359,9 @@ define <vscale x 8 x i16> @bic_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.h, z0.h, #0xfff7
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 8, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 8))
   ret <vscale x 8 x i16> %out
 }
 
@@ -398,11 +370,9 @@ define <vscale x 4 x i32> @bic_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.s, z0.s, #0xffffffef
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 16))
   ret <vscale x 4 x i32> %out
 }
 
@@ -411,11 +381,9 @@ define <vscale x 2 x i64> @bic_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.d, z0.d, #0xffffffffffffffdf
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 32))
   ret <vscale x 2 x i64> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
index bc5cdb48fef61c..3986a5a79d57da 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
@@ -9,9 +9,7 @@ define <vscale x 16 x i1> @reinterpret_bool_from_splat() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %ins, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %splat)
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> splat(i1 true))
   ret <vscale x 16 x i1> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-undef.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-undef.ll
index 70c2e5298c897f..370eea26450f3b 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-undef.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-undef.ll
@@ -60,11 +60,9 @@ define <vscale x 16 x i8> @asr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.b, z0.b, #3
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 3))
   ret <vscale x 16 x i8> %out
 }
 
@@ -73,11 +71,9 @@ define <vscale x 8 x i16> @asr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.h, z0.h, #4
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 4))
   ret <vscale x 8 x i16> %out
 }
 
@@ -86,11 +82,9 @@ define <vscale x 4 x i32> @asr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.s, z0.s, #5
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 5))
   ret <vscale x 4 x i32> %out
 }
 
@@ -99,11 +93,9 @@ define <vscale x 2 x i64> @asr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr z0.d, z0.d, #6
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 6))
   ret <vscale x 2 x i64> %out
 }
 
@@ -164,11 +156,9 @@ define <vscale x 16 x i8> @lsl_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.b, z0.b, #7
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 7))
   ret <vscale x 16 x i8> %out
 }
 
@@ -177,11 +167,9 @@ define <vscale x 8 x i16> @lsl_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.h, z0.h, #8
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 8, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 8))
   ret <vscale x 8 x i16> %out
 }
 
@@ -190,11 +178,9 @@ define <vscale x 4 x i32> @lsl_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.s, z0.s, #9
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 9, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 9))
   ret <vscale x 4 x i32> %out
 }
 
@@ -203,11 +189,9 @@ define <vscale x 2 x i64> @lsl_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #10
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 10, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 10))
   ret <vscale x 2 x i64> %out
 }
 
@@ -268,11 +252,9 @@ define <vscale x 16 x i8> @lsr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.b, z0.b, #8
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
-  %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %imm.splat)
+                                                                 <vscale x 16 x i8> splat(i8 8))
   ret <vscale x 16 x i8> %out
 }
 
@@ -281,11 +263,9 @@ define <vscale x 8 x i16> @lsr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.h, z0.h, #12
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 8 x i16> undef, i16 12, i32 0
-  %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %imm.splat)
+                                                                 <vscale x 8 x i16> splat(i16 12))
   ret <vscale x 8 x i16> %out
 }
 
@@ -294,11 +274,9 @@ define <vscale x 4 x i32> @lsr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.s, z0.s, #13
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 4 x i32> undef, i32 13, i32 0
-  %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %imm.splat)
+                                                                 <vscale x 4 x i32> splat(i32 13))
   ret <vscale x 4 x i32> %out
 }
 
@@ -307,11 +285,9 @@ define <vscale x 2 x i64> @lsr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr z0.d, z0.d, #14
 ; CHECK-NEXT:    ret
-  %imm = insertelement <vscale x 2 x i64> undef, i64 14, i32 0
-  %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %imm.splat)
+                                                                 <vscale x 2 x i64> splat(i64 14))
   ret <vscale x 2 x i64> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-knownbits.ll b/llvm/test/CodeGen/AArch64/sve-knownbits.ll
index ac391948d6bd4d..c22d18c7e2edea 100644
--- a/llvm/test/CodeGen/AArch64/sve-knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/sve-knownbits.ll
@@ -18,9 +18,7 @@ define <vscale x 4 x i32> @asrlsr(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    lsr z0.d, z0.d, #15
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %vb = zext <vscale x 4 x i32> splat(i32 15) to <vscale x 4 x i64>
   %x = ashr <vscale x 4 x i64> %va, %vb
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %y

diff  --git a/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
index a517eff05b0785..a409865138e0d7 100644
--- a/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
@@ -7,58 +7,52 @@ target triple = "aarch64-unknown-linux-gnu"
 
 define <vscale x 16 x i1> @fold_away_ptrue_and_ptrue() #0 {
 ; CHECK-LABEL: fold_away_ptrue_and_ptrue:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %0)
-  %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %and = and <vscale x 16 x i1> %2, %1
+  %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
+  %3 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %and = and <vscale x 16 x i1> %3, %2
   ret <vscale x 16 x i1> %and
 }
 
 define <vscale x 16 x i1> @fold_away_ptrue_and_splat_predicate() #0 {
 ; CHECK-LABEL: fold_away_ptrue_and_splat_predicate:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ret
-entry:
-  %ins = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %ins, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
-  %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %splat)
-  %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %and = and <vscale x 16 x i1> %0, %1
+  %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> splat(i1 true))
+  %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %and = and <vscale x 16 x i1> %1, %2
   ret <vscale x 16 x i1> %and
 }
 
 ; Ensure that one AND operation remain for inactive lanes zeroing with 2 x i1 type (llvm.aarch64.sve.convert.to.svbool.nxv2i1).
 define <vscale x 16 x i1> @fold_away_ptrue_and_convert_to() #0 {
 ; CHECK-LABEL: fold_away_ptrue_and_convert_to:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %0)
-  %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %3 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %2)
-  %4 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %3)
-  %and = and <vscale x 16 x i1> %4, %1
+  %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
+  %3 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %4 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %3)
+  %5 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %4)
+  %and = and <vscale x 16 x i1> %5, %2
   ret <vscale x 16 x i1> %and
 }
 
 define <vscale x 16 x i1> @fold_away_two_similar() #0 {
 ; CHECK-LABEL: fold_away_two_similar:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %and = and <vscale x 16 x i1> %0, %1
+  %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %and = and <vscale x 16 x i1> %1, %2
   ret <vscale x 16 x i1> %and
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-splat-sext.ll b/llvm/test/CodeGen/AArch64/sve-splat-sext.ll
index f689aa6469a255..467af905a4d844 100644
--- a/llvm/test/CodeGen/AArch64/sve-splat-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-splat-sext.ll
@@ -6,9 +6,7 @@ define <vscale x 8 x i16> @sext_splat_v8i16_128() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #-128 // =0xffffffffffffff80
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 8 x i16> poison, i16 128, i32 0
-  %s = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %a = shl <vscale x 8 x i16> %s, splat (i16 8)
+  %a = shl <vscale x 8 x i16> splat(i16 128), splat (i16 8)
   %b = ashr <vscale x 8 x i16> %a, splat (i16 8)
   ret <vscale x 8 x i16> %b
 }
@@ -20,9 +18,7 @@ define <vscale x 8 x i1> @sext_icmp_splat_v8i16_128(<vscale x 8 x i8> %d) {
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z0.h, #-1
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 8 x i8> poison, i8 128, i32 0
-  %s = shufflevector <vscale x 8 x i8> %i, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %c = icmp ugt <vscale x 8 x i8> %s, %d
+  %c = icmp ugt <vscale x 8 x i8> splat(i8 128), %d
   ret <vscale x 8 x i1> %c
 }
 
@@ -33,8 +29,6 @@ define <vscale x 4 x i1> @sext_icmp_splat_v4i16_128(<vscale x 4 x i8> %d) {
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    cmpgt p0.s, p0/z, z0.s, #-1
 ; CHECK-NEXT:    ret
-  %i = insertelement <vscale x 4 x i8> poison, i8 128, i32 0
-  %s = shufflevector <vscale x 4 x i8> %i, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %c = icmp ugt <vscale x 4 x i8> %s, %d
+  %c = icmp ugt <vscale x 4 x i8> splat(i8 128), %d
   ret <vscale x 4 x i1> %c
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
index 3273e6b384f637..a0bfc7034a3864 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
@@ -109,9 +109,7 @@ define void @store_nxv2f32(ptr %out) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x float> undef, float 1.0, i32 0
-  %splat = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  store <vscale x 2 x float> %splat, ptr %out
+  store <vscale x 2 x float> splat(float 1.0), ptr %out
   ret void
 }
 
@@ -122,9 +120,7 @@ define void @store_nxv4f16(ptr %out) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x half> undef, half 1.0, i32 0
-  %splat = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  store <vscale x 4 x half> %splat, ptr %out
+  store <vscale x 4 x half> splat(half 1.0), ptr %out
   ret void
 }
 
@@ -139,9 +135,7 @@ define void @store_nxv6f32(ptr %out) {
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    st1w { z0.s }, p1, [x0]
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 6 x float> undef, float 1.0, i32 0
-  %splat = shufflevector <vscale x 6 x float> %ins, <vscale x 6 x float> undef, <vscale x 6 x i32> zeroinitializer
-  store <vscale x 6 x float> %splat, ptr %out
+  store <vscale x 6 x float> splat(float 1.0), ptr %out
   ret void
 }
 
@@ -154,8 +148,6 @@ define void @store_nxv12f16(ptr %out) {
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p1, [x0]
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 12 x half> undef, half 1.0, i32 0
-  %splat = shufflevector <vscale x 12 x half> %ins, <vscale x 12 x half> undef, <vscale x 12 x i32> zeroinitializer
-  store <vscale x 12 x half> %splat, ptr %out
+  store <vscale x 12 x half> splat(half 1.0), ptr %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-stepvector.ll b/llvm/test/CodeGen/AArch64/sve-stepvector.ll
index f79ec00a19e6f4..91c4659997789e 100644
--- a/llvm/test/CodeGen/AArch64/sve-stepvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stepvector.ll
@@ -5,75 +5,69 @@
 
 define <vscale x 2 x i64> @stepvector_nxv2i64() {
 ; CHECK-LABEL: stepvector_nxv2i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  ret <vscale x 2 x i64> %0
+  %1 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  ret <vscale x 2 x i64> %1
 }
 
 define <vscale x 4 x i32> @stepvector_nxv4i32() {
 ; CHECK-LABEL: stepvector_nxv4i32:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-  ret <vscale x 4 x i32> %0
+  %1 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+  ret <vscale x 4 x i32> %1
 }
 
 define <vscale x 8 x i16> @stepvector_nxv8i16() {
 ; CHECK-LABEL: stepvector_nxv8i16:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
-  ret <vscale x 8 x i16> %0
+  %1 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+  ret <vscale x 8 x i16> %1
 }
 
 define <vscale x 16 x i8> @stepvector_nxv16i8() {
 ; CHECK-LABEL: stepvector_nxv16i8:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
-  ret <vscale x 16 x i8> %0
+  %1 = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
+  ret <vscale x 16 x i8> %1
 }
 
 ; ILLEGAL INTEGER TYPES
 
 define <vscale x 6 x i64> @stepvector_nxv6i64() {
 ; CHECK-LABEL: stepvector_nxv6i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 6 x i64> @llvm.stepvector.nxv6i64()
-  ret <vscale x 6 x i64> %0
+  %1 = call <vscale x 6 x i64> @llvm.stepvector.nxv6i64()
+  ret <vscale x 6 x i64> %1
 }
 
 define <vscale x 4 x i64> @stepvector_nxv4i64() {
 ; CHECK-LABEL: stepvector_nxv4i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-  ret <vscale x 4 x i64> %0
+  %1 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+  ret <vscale x 4 x i64> %1
 }
 
 define <vscale x 16 x i32> @stepvector_nxv16i32() {
 ; CHECK-LABEL: stepvector_nxv16i32:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
@@ -82,231 +76,201 @@ define <vscale x 16 x i32> @stepvector_nxv16i32() {
 ; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    incw z3.s, all, mul #2
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-  ret <vscale x 16 x i32> %0
+  %1 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+  ret <vscale x 16 x i32> %1
 }
 
 define <vscale x 3 x i32> @stepvector_nxv3i32() {
 ; CHECK-LABEL: stepvector_nxv3i32:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 3 x i32> @llvm.stepvector.nxv3i32()
-  ret <vscale x 3 x i32> %0
+  %1 = call <vscale x 3 x i32> @llvm.stepvector.nxv3i32()
+  ret <vscale x 3 x i32> %1
 }
 
 define <vscale x 2 x i32> @stepvector_nxv2i32() {
 ; CHECK-LABEL: stepvector_nxv2i32:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
-  ret <vscale x 2 x i32> %0
+  %1 = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+  ret <vscale x 2 x i32> %1
 }
 
 define <vscale x 4 x i16> @stepvector_nxv4i16() {
 ; CHECK-LABEL: stepvector_nxv4i16:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
-  ret <vscale x 4 x i16> %0
+  %1 = call <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
+  ret <vscale x 4 x i16> %1
 }
 
 define <vscale x 8 x i8> @stepvector_nxv8i8() {
 ; CHECK-LABEL: stepvector_nxv8i8:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  ret <vscale x 8 x i8> %0
+  %1 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  ret <vscale x 8 x i8> %1
 }
 
 define <vscale x 8 x i8> @add_stepvector_nxv8i8() {
 ; CHECK-LABEL: add_stepvector_nxv8i8:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #2
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %1 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %2 = add <vscale x 8 x i8> %0, %1
-  ret <vscale x 8 x i8> %2
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  %3 = add <vscale x 8 x i8> %1, %2
+  ret <vscale x 8 x i8> %3
 }
 
 define <vscale x 8 x i8> @add_stepvector_nxv8i8_1(<vscale x 8 x i8> %p) {
 ; CHECK-LABEL: add_stepvector_nxv8i8_1:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.h, #0, #2
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %1 = add <vscale x 8 x i8> %p, %0
-  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %3 = add <vscale x 8 x i8> %1, %2
-  ret <vscale x 8 x i8> %3
+  %1 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  %2 = add <vscale x 8 x i8> %p, %1
+  %3 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  %4 = add <vscale x 8 x i8> %2, %3
+  ret <vscale x 8 x i8> %4
 }
 
 define <vscale x 8 x i8> @add_stepvector_nxv8i8_2() {
 ; CHECK-LABEL: add_stepvector_nxv8i8_2:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #2, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %3 = add <vscale x 8 x i8> %2, %1
+  %3 = add <vscale x 8 x i8> %2, splat(i8 2)
   ret <vscale x 8 x i8> %3
 }
 
 define <vscale x 8 x i8> @add_stepvector_nxv8i8_2_commutative() {
 ; CHECK-LABEL: add_stepvector_nxv8i8_2_commutative:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #2, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %3 = add <vscale x 8 x i8> %1, %2
+  %3 = add <vscale x 8 x i8> splat(i8 2), %2
   ret <vscale x 8 x i8> %3
 }
 
 define <vscale x 8 x i16> @add_stepvector_nxv8i16_1(i16 %data) {
 ; CHECK-LABEL: add_stepvector_nxv8i16_1:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, w0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i16> poison, i16 %data, i32 0
-  %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
-  %3 = add <vscale x 8 x i16> %2, %1
-  ret <vscale x 8 x i16> %3
+  %1 = insertelement <vscale x 8 x i16> poison, i16 %data, i32 0
+  %2 = shufflevector <vscale x 8 x i16> %1, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %3 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+  %4 = add <vscale x 8 x i16> %3, %2
+  ret <vscale x 8 x i16> %4
 }
 
 define <vscale x 4 x i32> @add_stepvector_nxv4i32_1(i32 %data) {
 ; CHECK-LABEL: add_stepvector_nxv4i32_1:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, w0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
-  %1 = shufflevector <vscale x 4 x i32> %0, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %2 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-  %3 = add <vscale x 4 x i32> %2, %1
-  ret <vscale x 4 x i32> %3
+  %1 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
+  %2 = shufflevector <vscale x 4 x i32> %1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %3 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+  %4 = add <vscale x 4 x i32> %3, %2
+  ret <vscale x 4 x i32> %4
 }
 
 define <vscale x 4 x i32> @multiple_use_stepvector_nxv4i32_1(i32 %data) {
 ; CHECK-LABEL: multiple_use_stepvector_nxv4i32_1:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, w0, #1
 ; CHECK-NEXT:    mov z1.s, w0
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mul z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    sub z0.s, z1.s, z0.s
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
-  %1 = shufflevector <vscale x 4 x i32> %0, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %2 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-  %3 = add <vscale x 4 x i32> %2, %1
-  %4 = mul <vscale x 4 x i32> %1, %3
-  %5 = sub <vscale x 4 x i32> %4, %3
-  ret <vscale x 4 x i32> %5
+  %1 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
+  %2 = shufflevector <vscale x 4 x i32> %1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %3 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+  %4 = add <vscale x 4 x i32> %3, %2
+  %5 = mul <vscale x 4 x i32> %2, %4
+  %6 = sub <vscale x 4 x i32> %5, %4
+  ret <vscale x 4 x i32> %6
 }
 
 define <vscale x 2 x i64> @add_stepvector_nxv2i64_1(i64 %data) {
 ; CHECK-LABEL: add_stepvector_nxv2i64_1:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, x0, #1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = add <vscale x 2 x i64> %1, %2
-  ret <vscale x 2 x i64> %3
+  %1 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
+  %2 = shufflevector <vscale x 2 x i64> %1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %3 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  %4 = add <vscale x 2 x i64> %2, %3
+  ret <vscale x 2 x i64> %4
 }
 
 define <vscale x 2 x i64> @multiple_use_stepvector_nxv2i64_1(i64 %data) {
 ; CHECK-LABEL: multiple_use_stepvector_nxv2i64_1:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    add z1.d, z0.d, z1.d
 ; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = add <vscale x 2 x i64> %1, %2
-  %4 = mul <vscale x 2 x i64> %3, %2
-  ret <vscale x 2 x i64> %4
+  %1 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
+  %2 = shufflevector <vscale x 2 x i64> %1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %3 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  %4 = add <vscale x 2 x i64> %2, %3
+  %5 = mul <vscale x 2 x i64> %4, %3
+  ret <vscale x 2 x i64> %5
 }
 
 define <vscale x 8 x i8> @mul_stepvector_nxv8i8() {
 ; CHECK-LABEL: mul_stepvector_nxv8i8:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #2
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %3 = mul <vscale x 8 x i8> %2, %1
+  %3 = mul <vscale x 8 x i8> %2, splat(i8 2)
   ret <vscale x 8 x i8> %3
 }
 
 define <vscale x 2 x i64> @mul_stepvector_nxv2i64() {
 ; CHECK-LABEL: mul_stepvector_nxv2i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2222 // =0x8ae
 ; CHECK-NEXT:    index z0.d, #0, x8
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 2222, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = mul <vscale x 2 x i64> %2, %1
+  %3 = mul <vscale x 2 x i64> %2, splat(i64 2222)
   ret <vscale x 2 x i64> %3
 }
 
 define <vscale x 2 x i64> @mul_stepvector_bigconst_nxv2i64() {
 ; CHECK-LABEL: mul_stepvector_bigconst_nxv2i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #146028888064 // =0x2200000000
 ; CHECK-NEXT:    index z0.d, #0, x8
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 146028888064, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = mul <vscale x 2 x i64> %2, %1
+  %3 = mul <vscale x 2 x i64> %2, splat(i64 146028888064)
   ret <vscale x 2 x i64> %3
 }
 
 define <vscale x 2 x i64> @mul_add_stepvector_nxv2i64(i64 %x) {
 ; CHECK-LABEL: mul_add_stepvector_nxv2i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2222 // =0x8ae
 ; CHECK-NEXT:    index z0.d, x0, x8
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 2222, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = mul <vscale x 2 x i64> %2, %1
+  %3 = mul <vscale x 2 x i64> %2, splat(i64 2222)
   %4 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
   %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %6 = add <vscale x 2 x i64> %3, %5
@@ -315,31 +279,27 @@ entry:
 
 define <vscale x 2 x i64> @mul_add_stepvector_nxv2i64_commutative(i64 %x, i64 %y) {
 ; CHECK-LABEL: mul_add_stepvector_nxv2i64_commutative:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, x0, x1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 %y, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = mul <vscale x 2 x i64> %1, %2
-  %4 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
-  %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %6 = add <vscale x 2 x i64> %5, %3
-  ret <vscale x 2 x i64> %6
+  %1 = insertelement <vscale x 2 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 2 x i64> %1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %3 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  %4 = mul <vscale x 2 x i64> %2, %3
+  %5 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
+  %6 = shufflevector <vscale x 2 x i64> %5, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %7 = add <vscale x 2 x i64> %6, %4
+  ret <vscale x 2 x i64> %7
 }
 
 define <vscale x 2 x i64> @mul_add_stepvector_bigconst_nxv2i64(i64 %x) {
 ; CHECK-LABEL: mul_add_stepvector_bigconst_nxv2i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #146028888064 // =0x2200000000
 ; CHECK-NEXT:    index z0.d, x0, x8
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 2 x i64> poison, i64 146028888064, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = mul <vscale x 2 x i64> %2, %1
+  %3 = mul <vscale x 2 x i64> %2, splat(i64 146028888064)
   %4 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
   %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %6 = add <vscale x 2 x i64> %3, %5
@@ -348,82 +308,69 @@ entry:
 
 define <vscale x 2 x i64> @mul_mul_add_stepvector_nxv2i64(i64 %x, i64 %y) {
 ; CHECK-LABEL: mul_mul_add_stepvector_nxv2i64:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, x0, lsl #1
 ; CHECK-NEXT:    index z0.d, x1, x8
 ; CHECK-NEXT:    ret
-entry:
   %xmul = mul i64 %x, 3
-  %0 = insertelement <vscale x 2 x i64> poison, i64 %xmul, i32 0
-  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-  %3 = mul <vscale x 2 x i64> %2, %1
-  %4 = insertelement <vscale x 2 x i64> poison, i64 %y, i32 0
-  %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %6 = add <vscale x 2 x i64> %3, %5
-  ret <vscale x 2 x i64> %6
+  %1 = insertelement <vscale x 2 x i64> poison, i64 %xmul, i32 0
+  %2 = shufflevector <vscale x 2 x i64> %1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %3 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  %4 = mul <vscale x 2 x i64> %3, %2
+  %5 = insertelement <vscale x 2 x i64> poison, i64 %y, i32 0
+  %6 = shufflevector <vscale x 2 x i64> %5, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %7 = add <vscale x 2 x i64> %4, %6
+  ret <vscale x 2 x i64> %7
 }
 
 define <vscale x 8 x i8> @shl_stepvector_nxv8i8() {
 ; CHECK-LABEL: shl_stepvector_nxv8i8:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #4
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %3 = shl <vscale x 8 x i8> %2, %1
+  %3 = shl <vscale x 8 x i8> %2, splat(i8 2)
   ret <vscale x 8 x i8> %3
 }
 
 define <vscale x 8 x i16> @sub_multiple_use_stepvector_nxv8i16() {
 ; CHECK-LABEL: sub_multiple_use_stepvector_nxv8i16:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    subr z1.h, z1.h, #2 // =0x2
 ; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
-  %3 = sub <vscale x 8 x i16> %1, %2
+  %3 = sub <vscale x 8 x i16> splat(i16 2), %2
   %4 = shl <vscale x 8 x i16> %2, %3
   ret <vscale x 8 x i16> %4
 }
 
 define <vscale x 8 x i16> @sub_stepvector_nxv8i16() {
 ; CHECK-LABEL: sub_stepvector_nxv8i16:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #2, #-1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
-  %3 = sub <vscale x 8 x i16> %1, %2
+  %3 = sub <vscale x 8 x i16> splat(i16 2), %2
   ret <vscale x 8 x i16> %3
 }
 
 define <vscale x 8 x i8> @promote_sub_stepvector_nxv8i8() {
 ; CHECK-LABEL: promote_sub_stepvector_nxv8i8:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #2, #-1
 ; CHECK-NEXT:    ret
-entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
-  %3 = sub <vscale x 8 x i8> %1, %2
+  %3 = sub <vscale x 8 x i8> splat(i8 2), %2
   ret <vscale x 8 x i8> %3
 }
 
 define <vscale x 16 x i32> @split_sub_stepvector_nxv16i32() {
 ; CHECK-LABEL: split_sub_stepvector_nxv16i32:
-; CHECK:       // %bb.0: // %entry
+; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cntw x8
 ; CHECK-NEXT:    index z0.s, #0, #-1
 ; CHECK-NEXT:    neg x8, x8
@@ -435,10 +382,9 @@ define <vscale x 16 x i32> @split_sub_stepvector_nxv16i32() {
 ; CHECK-NEXT:    add z2.s, z0.s, z3.s
 ; CHECK-NEXT:    add z3.s, z1.s, z3.s
 ; CHECK-NEXT:    ret
-entry:
-  %0 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-  %1 = sub <vscale x 16 x i32> zeroinitializer, %0
-  ret <vscale x 16 x i32> %1
+  %1 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+  %2 = sub <vscale x 16 x i32> zeroinitializer, %1
+  ret <vscale x 16 x i32> %2
 }
 
 declare <vscale x 2 x i64> @llvm.stepvector.nxv2i64()

diff  --git a/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll b/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll
index 0bdaefdfc2a3f0..c309ad6ac5cce2 100644
--- a/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll
@@ -56,9 +56,7 @@ define i1 @reduce_and_insert_subvec_into_ones(<vscale x 4 x i1> %in) {
 ; CHECK-NEXT:    nots p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
-  %allones.ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %allones.ins,  <vscale x 16 x i1> poison,  <vscale x 16 x i32> zeroinitializer
-  %t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %allones, <vscale x 4 x i1> %in, i64 0)
+  %t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> splat(i1 true), <vscale x 4 x i1> %in, i64 0)
   %res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
   ret i1 %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index 9253d5ab4531ac..8dd433b6f23c68 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -48,9 +48,7 @@ define <vscale x 16 x i8> @sve_splat_16xi8_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 16 x i8> undef, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  ret <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> splat(i8 1)
 }
 
 define <vscale x 8 x i16> @sve_splat_8xi16_dup_imm() {
@@ -58,9 +56,7 @@ define <vscale x 8 x i16> @sve_splat_8xi16_dup_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> splat(i16 1)
 }
 
 define <vscale x 8 x i16> @sve_splat_8xi16_dupm_imm() {
@@ -68,9 +64,7 @@ define <vscale x 8 x i16> @sve_splat_8xi16_dupm_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #16256 // =0x3f80
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i16> undef, i16 16256, i32 0 ; 0x3f80
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> splat(i16 16256)
 }
 
 define <vscale x 4 x i32> @sve_splat_4xi32_dup_imm() {
@@ -78,9 +72,7 @@ define <vscale x 4 x i32> @sve_splat_4xi32_dup_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> splat(i32 1)
 }
 
 define <vscale x 4 x i32> @sve_splat_4xi32_dupm_imm() {
@@ -88,9 +80,7 @@ define <vscale x 4 x i32> @sve_splat_4xi32_dupm_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #0xff0000
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i32> undef, i32 16711680, i32 0 ; 0xff0000
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> splat(i32 16711680)
 }
 
 define <vscale x 2 x i64> @sve_splat_2xi64_dup_imm() {
@@ -98,9 +88,7 @@ define <vscale x 2 x i64> @sve_splat_2xi64_dup_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #1 // =0x1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i64> undef, i64 1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> splat(i64 1)
 }
 
 define <vscale x 2 x i64> @sve_splat_2xi64_dupm_imm() {
@@ -108,9 +96,7 @@ define <vscale x 2 x i64> @sve_splat_2xi64_dupm_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #0xffff00000000
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i64> undef, i64 281470681743360, i32 0 ; 0xffff00000000
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> splat(i64 281470681743360)
 }
 
 ;; Promote splats of smaller illegal integer vector types
@@ -151,9 +137,7 @@ define <vscale x 8 x i8> @sve_splat_8xi8_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #255 // =0xff
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i8> undef, i8 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i8> %splat
+  ret <vscale x 8 x i8> splat(i8 -1)
 }
 
 define <vscale x 2 x i16> @sve_splat_2xi16(i16 %val) {
@@ -182,9 +166,7 @@ define <vscale x 4 x i16> @sve_splat_4xi16_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #65535 // =0xffff
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i16> undef, i16 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x i16> %splat
+  ret <vscale x 4 x i16> splat(i16 -1)
 }
 
 define <vscale x 2 x i32> @sve_splat_2xi32(i32 %val) {
@@ -203,9 +185,7 @@ define <vscale x 2 x i32> @sve_splat_2xi32_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #0xffffffff
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i32> undef, i32 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x i32> %splat
+  ret <vscale x 2 x i32> splat(i32 -1)
 }
 
 ;; Widen/split splats of wide vector types.
@@ -459,9 +439,7 @@ define <vscale x 8 x half> @splat_nxv8f16_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.h, #1.00000000
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 8 x half> undef, half 1.0, i32 0
-  %2 = shufflevector <vscale x 8 x half> %1, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x half> %2
+  ret <vscale x 8 x half> splat(half 1.0)
 }
 
 define <vscale x 4 x half> @splat_nxv4f16_imm() {
@@ -469,9 +447,7 @@ define <vscale x 4 x half> @splat_nxv4f16_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.h, #1.00000000
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 4 x half> undef, half 1.0, i32 0
-  %2 = shufflevector <vscale x 4 x half> %1, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x half> %2
+  ret <vscale x 4 x half> splat(half 1.0)
 }
 
 define <vscale x 2 x half> @splat_nxv2f16_imm() {
@@ -479,9 +455,7 @@ define <vscale x 2 x half> @splat_nxv2f16_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.h, #1.00000000
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x half> undef, half 1.0, i32 0
-  %2 = shufflevector <vscale x 2 x half> %1, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x half> %2
+  ret <vscale x 2 x half> splat(half 1.0)
 }
 
 define <vscale x 4 x float> @splat_nxv4f32_imm() {
@@ -489,9 +463,7 @@ define <vscale x 4 x float> @splat_nxv4f32_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.s, #1.00000000
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 4 x float> undef, float 1.0, i32 0
-  %2 = shufflevector <vscale x 4 x float> %1, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x float> %2
+  ret <vscale x 4 x float> splat(float 1.0)
 }
 
 define <vscale x 2 x float> @splat_nxv2f32_imm() {
@@ -499,9 +471,7 @@ define <vscale x 2 x float> @splat_nxv2f32_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.s, #1.00000000
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x float> undef, float 1.0, i32 0
-  %2 = shufflevector <vscale x 2 x float> %1, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x float> %2
+  ret <vscale x 2 x float> splat(float 1.0)
 }
 
 define <vscale x 2 x double> @splat_nxv2f64_imm() {
@@ -509,9 +479,7 @@ define <vscale x 2 x double> @splat_nxv2f64_imm() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.d, #1.00000000
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x double> undef, double 1.0, i32 0
-  %2 = shufflevector <vscale x 2 x double> %1, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x double> %2
+  ret <vscale x 2 x double> splat(double 1.0)
 }
 
 define <vscale x 4 x i32> @splat_nxv4i32_fold(<vscale x 4 x i32> %x) {
@@ -539,9 +507,7 @@ define <vscale x 2 x float> @splat_nxv2f32_fmov_fold() {
 ; CHECK-NEXT:    mov w8, #1109917696 // =0x42280000
 ; CHECK-NEXT:    mov z0.s, w8
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x float> undef, float 4.200000e+01, i32 0
-  %2 = shufflevector <vscale x 2 x float> %1, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x float> %2
+  ret <vscale x 2 x float> splat(float 4.200000e+01)
 }
 
 define <vscale x 4 x float> @splat_nxv4f32_fmov_fold() {
@@ -550,9 +516,7 @@ define <vscale x 4 x float> @splat_nxv4f32_fmov_fold() {
 ; CHECK-NEXT:    mov w8, #1109917696 // =0x42280000
 ; CHECK-NEXT:    mov z0.s, w8
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 4 x float> undef, float 4.200000e+01, i32 0
-  %2 = shufflevector <vscale x 4 x float> %1, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x float> %2
+  ret <vscale x 4 x float> splat(float 4.200000e+01)
 }
 
 define <vscale x 2 x double> @splat_nxv2f64_fmov_fold() {
@@ -561,9 +525,7 @@ define <vscale x 2 x double> @splat_nxv2f64_fmov_fold() {
 ; CHECK-NEXT:    mov x8, #4631107791820423168 // =0x4045000000000000
 ; CHECK-NEXT:    mov z0.d, x8
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x double> undef, double 4.200000e+01, i32 0
-  %2 = shufflevector <vscale x 2 x double> %1, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x double> %2
+  ret <vscale x 2 x double> splat(double 4.200000e+01)
 }
 
 ; Splat of float constants not representable as a single immediate.
@@ -575,9 +537,7 @@ define <vscale x 2 x float> @splat_nxv2f32_imm_out_of_range() {
 ; CHECK-NEXT:    movk w8, #16469, lsl #16
 ; CHECK-NEXT:    mov z0.s, w8
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x float> undef, float 3.3299999237060546875, i32 0
-  %2 = shufflevector <vscale x 2 x float> %1, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x float> %2
+  ret <vscale x 2 x float> splat(float 3.3299999237060546875)
 }
 
 define <vscale x 4 x float> @splat_nxv4f32_imm_out_of_range() {
@@ -587,9 +547,7 @@ define <vscale x 4 x float> @splat_nxv4f32_imm_out_of_range() {
 ; CHECK-NEXT:    movk w8, #16469, lsl #16
 ; CHECK-NEXT:    mov z0.s, w8
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 4 x float> undef, float 3.3299999237060546875, i32 0
-  %2 = shufflevector <vscale x 4 x float> %1, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x float> %2
+  ret <vscale x 4 x float> splat(float 3.3299999237060546875)
 }
 
 define <vscale x 2 x double> @splat_nxv2f64_imm_out_of_range() {
@@ -600,9 +558,7 @@ define <vscale x 2 x double> @splat_nxv2f64_imm_out_of_range() {
 ; CHECK-NEXT:    add x8, x8, :lo12:.LCPI57_0
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %1 = insertelement <vscale x 2 x double> undef, double 3.33, i32 0
-  %2 = shufflevector <vscale x 2 x double> %1, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x double> %2
+  ret <vscale x 2 x double> splat(double 3.33)
 }
 
 ; Splat for predicates
@@ -613,10 +569,7 @@ define <vscale x 2 x i1> @sve_splat_i1_allactive() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %ins, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x i1> %splat
+  ret <vscale x 2 x i1> splat(i1 true)
 }
 
-; +bf16 is required for the bfloat version.
-attributes #0 = { "target-features"="+sve,+bf16" }
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve2-int-mul.ll b/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
index 800888b7e6cb97..2bf3d05eff06a4 100644
--- a/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
@@ -10,9 +10,7 @@ define <vscale x 8 x i16> @mul_i16_imm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    mov z1.h, #255 // =0xff
 ; CHECK-NEXT:    mul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = mul <vscale x 8 x i16> %a, %splat
+  %res = mul <vscale x 8 x i16> %a, splat(i16 255)
   ret <vscale x 8 x i16> %res
 }
 
@@ -23,9 +21,7 @@ define <vscale x 8 x i16> @mul_i16_imm_neg(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 -200, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res = mul <vscale x 8 x i16> %a, %splat
+  %res = mul <vscale x 8 x i16> %a, splat(i16 -200)
   ret <vscale x 8 x i16> %res
 }
 
@@ -35,9 +31,7 @@ define <vscale x 4 x i32> @mul_i32_imm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    mov z1.s, #255 // =0xff
 ; CHECK-NEXT:    mul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = mul <vscale x 4 x i32> %a, %splat
+  %res = mul <vscale x 4 x i32> %a, splat(i32 255)
   ret <vscale x 4 x i32> %res
 }
 
@@ -48,9 +42,7 @@ define <vscale x 4 x i32> @mul_i32_imm_neg(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 -200, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res = mul <vscale x 4 x i32> %a, %splat
+  %res = mul <vscale x 4 x i32> %a, splat(i32 -200)
   ret <vscale x 4 x i32> %res
 }
 
@@ -60,9 +52,7 @@ define <vscale x 2 x i64> @mul_i64_imm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    mov z1.d, #255 // =0xff
 ; CHECK-NEXT:    mul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
+  %res = mul <vscale x 2 x i64> %a, splat(i64 255)
   ret <vscale x 2 x i64> %res
 }
 
@@ -73,51 +63,45 @@ define <vscale x 2 x i64> @mul_i64_imm_neg(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -200, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
+  %res = mul <vscale x 2 x i64> %a, splat(i64 -200)
   ret <vscale x 2 x i64> %res
 }
 
 ;
 ; MUL (vector, unpredicated)
 ;
-define <vscale x 16 x i8> @mul_i8(<vscale x 16 x i8> %a,
+define <vscale x 16 x i8> @mul_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: mul_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-                                  <vscale x 16 x i8> %b) {
   %res = mul <vscale x 16 x i8> %a, %b
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @mul_i16(<vscale x 8 x i16> %a,
+define <vscale x 8 x i16> @mul_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: mul_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-                                  <vscale x 8 x i16> %b) {
   %res = mul <vscale x 8 x i16> %a, %b
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @mul_i32(<vscale x 4 x i32> %a,
+define <vscale x 4 x i32> @mul_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: mul_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-                                  <vscale x 4 x i32> %b) {
   %res = mul <vscale x 4 x i32> %a, %b
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i64> %a,
+define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: mul_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-                                  <vscale x 2 x i64> %b) {
   %res = mul <vscale x 2 x i64> %a, %b
   ret <vscale x 2 x i64> %res
 }
@@ -125,48 +109,44 @@ define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i64> %a,
 ;
 ; SMULH (vector, unpredicated)
 ;
-define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a,
+define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: smulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-                                    <vscale x 16 x i8> %b) {
   %sel = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.u.nxv16i8(<vscale x 16 x i1> %sel, <vscale x 16 x i8> %a,
                                                                    <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a,
+define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: smulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-                                     <vscale x 8 x i16> %b) {
   %sel = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
   %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.u.nxv8i16(<vscale x 8 x i1> %sel, <vscale x 8 x i16> %a,
                                                                    <vscale x 8 x i16> %b)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a,
+define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: smulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-                                     <vscale x 4 x i32> %b) {
   %sel = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.u.nxv4i32(<vscale x 4 x i1> %sel, <vscale x 4 x i32> %a,
                                                                    <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a,
+define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: smulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-                                     <vscale x 2 x i64> %b) {
   %sel = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.u.nxv2i64(<vscale x 2 x i1> %sel, <vscale x 2 x i64> %a,
                                                                    <vscale x 2 x i64> %b)
@@ -176,48 +156,44 @@ define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a,
 ;
 ; UMULH (vector, unpredicated)
 ;
-define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a,
+define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: umulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-                                    <vscale x 16 x i8> %b) {
   %sel = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.u.nxv16i8(<vscale x 16 x i1> %sel, <vscale x 16 x i8> %a,
                                                                    <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a,
+define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: umulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-                                     <vscale x 8 x i16> %b) {
   %sel = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
   %res = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.u.nxv8i16(<vscale x 8 x i1> %sel, <vscale x 8 x i16> %a,
                                                                    <vscale x 8 x i16> %b)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a,
+define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: umulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-                                     <vscale x 4 x i32> %b) {
   %sel = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.u.nxv4i32(<vscale x 4 x i1> %sel, <vscale x 4 x i32> %a,
                                                                    <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a,
+define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: umulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-                                     <vscale x 2 x i64> %b) {
   %sel = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.u.nxv2i64(<vscale x 2 x i1> %sel, <vscale x 2 x i64> %a,
                                                                    <vscale x 2 x i64> %b)
@@ -227,12 +203,11 @@ define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a,
 ;
 ; PMUL (vector, unpredicated)
 ;
-define <vscale x 16 x i8> @pmul_i8(<vscale x 16 x i8> %a,
+define <vscale x 16 x i8> @pmul_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: pmul_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    pmul z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-                                   <vscale x 16 x i8> %b) {
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.pmul.nxv16i8(<vscale x 16 x i8> %a,
                                                                 <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
@@ -241,45 +216,41 @@ define <vscale x 16 x i8> @pmul_i8(<vscale x 16 x i8> %a,
 ;
 ; SQDMULH (vector, unpredicated)
 ;
-define <vscale x 16 x i8> @sqdmulh_i8(<vscale x 16 x i8> %a,
+define <vscale x 16 x i8> @sqdmulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: sqdmulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqdmulh z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-                                      <vscale x 16 x i8> %b) {
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqdmulh.nxv16i8(<vscale x 16 x i8> %a,
                                                                    <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @sqdmulh_i16(<vscale x 8 x i16> %a,
+define <vscale x 8 x i16> @sqdmulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: sqdmulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqdmulh z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-                                       <vscale x 8 x i16> %b) {
   %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.nxv8i16(<vscale x 8 x i16> %a,
                                                                    <vscale x 8 x i16> %b)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @sqdmulh_i32(<vscale x 4 x i32> %a,
+define <vscale x 4 x i32> @sqdmulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: sqdmulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqdmulh z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-                                       <vscale x 4 x i32> %b) {
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.nxv4i32(<vscale x 4 x i32> %a,
                                                                    <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @sqdmulh_i64(<vscale x 2 x i64> %a,
+define <vscale x 2 x i64> @sqdmulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sqdmulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqdmulh z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-                                       <vscale x 2 x i64> %b) {
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.nxv2i64(<vscale x 2 x i64> %a,
                                                                    <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %res
@@ -288,45 +259,41 @@ define <vscale x 2 x i64> @sqdmulh_i64(<vscale x 2 x i64> %a,
 ;
 ; SQRDMULH (vector, unpredicated)
 ;
-define <vscale x 16 x i8> @sqrdmulh_i8(<vscale x 16 x i8> %a,
+define <vscale x 16 x i8> @sqrdmulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: sqrdmulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqrdmulh z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-                                       <vscale x 16 x i8> %b) {
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmulh.nxv16i8(<vscale x 16 x i8> %a,
                                                                     <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @sqrdmulh_i16(<vscale x 8 x i16> %a,
+define <vscale x 8 x i16> @sqrdmulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: sqrdmulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqrdmulh z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-                                        <vscale x 8 x i16> %b) {
   %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.nxv8i16(<vscale x 8 x i16> %a,
                                                                     <vscale x 8 x i16> %b)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @sqrdmulh_i32(<vscale x 4 x i32> %a,
+define <vscale x 4 x i32> @sqrdmulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: sqrdmulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqrdmulh z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-                                        <vscale x 4 x i32> %b) {
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.nxv4i32(<vscale x 4 x i32> %a,
                                                                     <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @sqrdmulh_i64(<vscale x 2 x i64> %a,
+define <vscale x 2 x i64> @sqrdmulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sqrdmulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sqrdmulh z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-                                        <vscale x 2 x i64> %b) {
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.nxv2i64(<vscale x 2 x i64> %a,
                                                                     <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
index c929a3faffabf6..bcf76d5b13d62a 100644
--- a/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
@@ -10,12 +10,10 @@ define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 16 x i16> undef, i16 8, i64 0
-  %splat = shufflevector <vscale x 16 x i16> %insert, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
   %1 = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %2 = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %mul = mul <vscale x 16 x i16> %1, %2
-  %shr = lshr <vscale x 16 x i16> %mul, %splat
+  %shr = lshr <vscale x 16 x i16> %mul, splat(i16 8)
   %tr = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %tr
 }
@@ -25,12 +23,10 @@ define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 8 x i32> undef, i32 16, i64 0
-  %splat = shufflevector <vscale x 8 x i32> %insert, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
   %1 = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %2 = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %mul = mul <vscale x 8 x i32> %1, %2
-  %shr = lshr <vscale x 8 x i32> %mul, %splat
+  %shr = lshr <vscale x 8 x i32> %mul, splat(i32 16)
   %tr = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %tr
 }
@@ -40,12 +36,10 @@ define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 4 x i64> undef, i64 32, i64 0
-  %splat = shufflevector <vscale x 4 x i64> %insert, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %1 = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %2 = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %mul = mul <vscale x 4 x i64> %1, %2
-  %shr = lshr <vscale x 4 x i64> %mul, %splat
+  %shr = lshr <vscale x 4 x i64> %mul, splat(i64 32)
   %tr = trunc <vscale x 4 x i64> %shr to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %tr
 }
@@ -55,12 +49,10 @@ define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i128> undef, i128 64, i64 0
-  %splat = shufflevector <vscale x 2 x i128> %insert, <vscale x 2 x i128> undef, <vscale x 2 x i32> zeroinitializer
   %1 = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %2 = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %mul = mul <vscale x 2 x i128> %1, %2
-  %shr = lshr <vscale x 2 x i128> %mul, %splat
+  %shr = lshr <vscale x 2 x i128> %mul, splat(i128 64)
   %tr = trunc <vscale x 2 x i128> %shr to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %tr
 }
@@ -74,12 +66,10 @@ define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 16 x i16> undef, i16 8, i64 0
-  %splat = shufflevector <vscale x 16 x i16> %insert, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
   %1 = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %2 = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %mul = mul <vscale x 16 x i16> %1, %2
-  %shr = lshr <vscale x 16 x i16> %mul, %splat
+  %shr = lshr <vscale x 16 x i16> %mul, splat(i16 8)
   %tr = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %tr
 }
@@ -89,12 +79,10 @@ define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 8 x i32> undef, i32 16, i64 0
-  %splat = shufflevector <vscale x 8 x i32> %insert, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
   %1 = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %2 = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %mul = mul <vscale x 8 x i32> %1, %2
-  %shr = lshr <vscale x 8 x i32> %mul, %splat
+  %shr = lshr <vscale x 8 x i32> %mul, splat(i32 16)
   %tr = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %tr
 }
@@ -104,12 +92,10 @@ define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 4 x i64> undef, i64 32, i64 0
-  %splat = shufflevector <vscale x 4 x i64> %insert, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %1 = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %2 = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %mul = mul <vscale x 4 x i64> %1, %2
-  %shr = lshr <vscale x 4 x i64> %mul, %splat
+  %shr = lshr <vscale x 4 x i64> %mul, splat(i64 32)
   %tr = trunc <vscale x 4 x i64> %shr to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %tr
 }
@@ -119,12 +105,10 @@ define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i128> undef, i128 64, i64 0
-  %splat = shufflevector <vscale x 2 x i128> %insert, <vscale x 2 x i128> undef, <vscale x 2 x i32> zeroinitializer
   %1 = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %2 = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %mul = mul <vscale x 2 x i128> %1, %2
-  %shr = lshr <vscale x 2 x i128> %mul, %splat
+  %shr = lshr <vscale x 2 x i128> %mul, splat(i128 64)
   %tr = trunc <vscale x 2 x i128> %shr to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %tr
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-int-arith-imm.ll
index 500973d053f5b8..20fdd7f0bba4e2 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-int-arith-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-int-arith-imm.ll
@@ -9,11 +9,9 @@ define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    sqadd z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -23,11 +21,9 @@ define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sqadd z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -37,11 +33,9 @@ define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sqadd z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -51,11 +45,9 @@ define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sqadd z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -65,11 +57,9 @@ define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sqadd z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -79,11 +69,9 @@ define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sqadd z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -93,11 +81,9 @@ define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sqadd z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -109,11 +95,9 @@ define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    sqsub z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                    <vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -123,11 +107,9 @@ define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sqsub z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -137,11 +119,9 @@ define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    sqsub z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -151,11 +131,9 @@ define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sqsub z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                    <vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -165,11 +143,9 @@ define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    sqsub z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                    <vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -179,11 +155,9 @@ define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sqsub z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                    <vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -193,11 +167,9 @@ define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    sqsub z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                    <vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -209,11 +181,9 @@ define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    uqadd z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.nxv16i8(<vscale x 16 x i1> %pg,
                                                                  <vscale x 16 x i8> %a,
-                                                                 <vscale x 16 x i8> %splat)
+                                                                 <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -223,11 +193,9 @@ define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    uqadd z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -237,11 +205,9 @@ define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    uqadd z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.nxv8i16(<vscale x 8 x i1> %pg,
                                                                  <vscale x 8 x i16> %a,
-                                                                 <vscale x 8 x i16> %splat)
+                                                                 <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -251,11 +217,9 @@ define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    uqadd z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -265,11 +229,9 @@ define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    uqadd z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.nxv4i32(<vscale x 4 x i1> %pg,
                                                                  <vscale x 4 x i32> %a,
-                                                                 <vscale x 4 x i32> %splat)
+                                                                 <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -279,11 +241,9 @@ define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    uqadd z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -293,11 +253,9 @@ define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    uqadd z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.nxv2i64(<vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %a,
-                                                                 <vscale x 2 x i64> %splat)
+                                                                 <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 
@@ -309,11 +267,9 @@ define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
 ; CHECK-NEXT:    uqsub z0.b, z0.b, #27 // =0x1b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.u.nxv16i8(<vscale x 16 x i1> %pg,
                                                                    <vscale x 16 x i8> %a,
-                                                                   <vscale x 16 x i8> %splat)
+                                                                   <vscale x 16 x i8> splat(i8 27))
   ret <vscale x 16 x i8> %out
 }
 
@@ -323,11 +279,9 @@ define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    uqsub z0.h, z0.h, #43 // =0x2b
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 43))
   ret <vscale x 8 x i16> %out
 }
 
@@ -337,11 +291,9 @@ define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
 ; CHECK-NEXT:    uqsub z0.h, z0.h, #2048 // =0x800
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x i16> %a,
-                                                                   <vscale x 8 x i16> %splat)
+                                                                   <vscale x 8 x i16> splat(i16 2048))
   ret <vscale x 8 x i16> %out
 }
 
@@ -351,11 +303,9 @@ define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    uqsub z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                    <vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 1))
   ret <vscale x 4 x i32> %out
 }
 
@@ -365,11 +315,9 @@ define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
 ; CHECK-NEXT:    uqsub z0.s, z0.s, #8192 // =0x2000
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
                                                                    <vscale x 4 x i32> %a,
-                                                                   <vscale x 4 x i32> %splat)
+                                                                   <vscale x 4 x i32> splat(i32 8192))
   ret <vscale x 4 x i32> %out
 }
 
@@ -379,11 +327,9 @@ define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    uqsub z0.d, z0.d, #255 // =0xff
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                    <vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 255))
   ret <vscale x 2 x i64> %out
 }
 
@@ -393,11 +339,9 @@ define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
 ; CHECK-NEXT:    uqsub z0.d, z0.d, #65280 // =0xff00
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
                                                                    <vscale x 2 x i64> %a,
-                                                                   <vscale x 2 x i64> %splat)
+                                                                   <vscale x 2 x i64> splat(i64 65280))
   ret <vscale x 2 x i64> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve2-sra.ll b/llvm/test/CodeGen/AArch64/sve2-sra.ll
index 3de49c9b8768b9..eafcd60bc16054 100644
--- a/llvm/test/CodeGen/AArch64/sve2-sra.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-sra.ll
@@ -10,9 +10,7 @@ define <vscale x 16 x i8> @usra_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.b, z1.b, #1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %shift = lshr <vscale x 16 x i8> %b, %splat
+  %shift = lshr <vscale x 16 x i8> %b, splat(i8 1)
   %add = add <vscale x 16 x i8> %a, %shift
   ret <vscale x 16 x i8> %add
 }
@@ -22,9 +20,7 @@ define <vscale x 8 x i16> @usra_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.h, z1.h, #2
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %shift = lshr <vscale x 8 x i16> %b, %splat
+  %shift = lshr <vscale x 8 x i16> %b, splat(i16 2)
   %add = add <vscale x 8 x i16> %a, %shift
   ret <vscale x 8 x i16> %add
 }
@@ -34,9 +30,7 @@ define <vscale x 4 x i32> @usra_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.s, z1.s, #3
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shift = lshr <vscale x 4 x i32> %b, %splat
+  %shift = lshr <vscale x 4 x i32> %b, splat(i32 3)
   %add = add <vscale x 4 x i32> %a, %shift
   ret <vscale x 4 x i32> %add
 }
@@ -46,9 +40,7 @@ define <vscale x 2 x i64> @usra_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.d, z1.d, #4
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %shift = lshr <vscale x 2 x i64> %b, %splat
+  %shift = lshr <vscale x 2 x i64> %b, splat(i64 4)
   %add = add <vscale x 2 x i64> %a, %shift
   ret <vscale x 2 x i64> %add
 }
@@ -59,9 +51,7 @@ define <vscale x 16 x i8> @usra_intr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8
 ; CHECK-NEXT:    usra z0.b, z1.b, #1
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> %splat)
+  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> splat(i8 1))
   %add = add <vscale x 16 x i8> %a, %shift
   ret <vscale x 16 x i8> %add
 }
@@ -72,9 +62,7 @@ define <vscale x 8 x i16> @usra_intr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1
 ; CHECK-NEXT:    usra z0.h, z1.h, #2
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %ins = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> %splat)
+  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> splat(i16 2))
   %add = add <vscale x 8 x i16> %a, %shift
   ret <vscale x 8 x i16> %add
 }
@@ -85,9 +73,7 @@ define <vscale x 4 x i32> @usra_intr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i3
 ; CHECK-NEXT:    usra z0.s, z1.s, #3
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %ins = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> %splat)
+  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> splat(i32 3))
   %add = add <vscale x 4 x i32> %a, %shift
   ret <vscale x 4 x i32> %add
 }
@@ -98,9 +84,7 @@ define <vscale x 2 x i64> @usra_intr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i6
 ; CHECK-NEXT:    usra z0.d, z1.d, #4
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %ins = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> %splat)
+  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> splat(i64 4))
   %add = add <vscale x 2 x i64> %a, %shift
   ret <vscale x 2 x i64> %add
 }
@@ -110,9 +94,7 @@ define <vscale x 16 x i8> @usra_intr_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.b, z1.b, #1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> %splat)
+  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> splat(i8 1))
   %add = add <vscale x 16 x i8> %a, %shift
   ret <vscale x 16 x i8> %add
 }
@@ -122,9 +104,7 @@ define <vscale x 8 x i16> @usra_intr_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.h, z1.h, #2
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> %splat)
+  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> splat(i16 2))
   %add = add <vscale x 8 x i16> %a, %shift
   ret <vscale x 8 x i16> %add
 }
@@ -134,9 +114,7 @@ define <vscale x 4 x i32> @usra_intr_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.s, z1.s, #3
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> %splat)
+  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> splat(i32 3))
   %add = add <vscale x 4 x i32> %a, %shift
   ret <vscale x 4 x i32> %add
 }
@@ -146,9 +124,7 @@ define <vscale x 2 x i64> @usra_intr_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    usra z0.d, z1.d, #4
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> %splat)
+  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> splat(i64 4))
   %add = add <vscale x 2 x i64> %a, %shift
   ret <vscale x 2 x i64> %add
 }
@@ -160,9 +136,7 @@ define <vscale x 16 x i8> @ssra_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.b, z1.b, #1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %shift = ashr <vscale x 16 x i8> %b, %splat
+  %shift = ashr <vscale x 16 x i8> %b, splat(i8 1)
   %add = add <vscale x 16 x i8> %a, %shift
   ret <vscale x 16 x i8> %add
 }
@@ -172,9 +146,7 @@ define <vscale x 8 x i16> @ssra_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.h, z1.h, #2
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %shift = ashr <vscale x 8 x i16> %b, %splat
+  %shift = ashr <vscale x 8 x i16> %b, splat(i16 2)
   %add = add <vscale x 8 x i16> %a, %shift
   ret <vscale x 8 x i16> %add
 }
@@ -184,9 +156,7 @@ define <vscale x 4 x i32> @ssra_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.s, z1.s, #3
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shift = ashr <vscale x 4 x i32> %b, %splat
+  %shift = ashr <vscale x 4 x i32> %b, splat(i32 3)
   %add = add <vscale x 4 x i32> %a, %shift
   ret <vscale x 4 x i32> %add
 }
@@ -196,9 +166,7 @@ define <vscale x 2 x i64> @ssra_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.d, z1.d, #4
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %shift = ashr <vscale x 2 x i64> %b, %splat
+  %shift = ashr <vscale x 2 x i64> %b, splat(i64 4)
   %add = add <vscale x 2 x i64> %a, %shift
   ret <vscale x 2 x i64> %add
 }
@@ -209,9 +177,7 @@ define <vscale x 16 x i8> @ssra_intr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8
 ; CHECK-NEXT:    ssra z0.b, z1.b, #1
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> %splat)
+  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> splat(i8 1))
   %add = add <vscale x 16 x i8> %a, %shift
   ret <vscale x 16 x i8> %add
 }
@@ -222,9 +188,7 @@ define <vscale x 8 x i16> @ssra_intr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1
 ; CHECK-NEXT:    ssra z0.h, z1.h, #2
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %ins = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> %splat)
+  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> splat(i16 2))
   %add = add <vscale x 8 x i16> %a, %shift
   ret <vscale x 8 x i16> %add
 }
@@ -235,9 +199,7 @@ define <vscale x 4 x i32> @ssra_intr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i3
 ; CHECK-NEXT:    ssra z0.s, z1.s, #3
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %ins = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> %splat)
+  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> splat(i32 3))
   %add = add <vscale x 4 x i32> %a, %shift
   ret <vscale x 4 x i32> %add
 }
@@ -248,9 +210,7 @@ define <vscale x 2 x i64> @ssra_intr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i6
 ; CHECK-NEXT:    ssra z0.d, z1.d, #4
 ; CHECK-NEXT:    ret
   %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %ins = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> %splat)
+  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> splat(i64 4))
   %add = add <vscale x 2 x i64> %a, %shift
   ret <vscale x 2 x i64> %add
 }
@@ -260,9 +220,7 @@ define <vscale x 16 x i8> @ssra_intr_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.b, z1.b, #1
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> %splat)
+  %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> splat(i8 1))
   %add = add <vscale x 16 x i8> %a, %shift
   ret <vscale x 16 x i8> %add
 }
@@ -272,9 +230,7 @@ define <vscale x 8 x i16> @ssra_intr_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.h, z1.h, #2
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> %splat)
+  %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> splat(i16 2))
   %add = add <vscale x 8 x i16> %a, %shift
   ret <vscale x 8 x i16> %add
 }
@@ -284,9 +240,7 @@ define <vscale x 4 x i32> @ssra_intr_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.s, z1.s, #3
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> %splat)
+  %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> splat(i32 3))
   %add = add <vscale x 4 x i32> %a, %shift
   ret <vscale x 4 x i32> %add
 }
@@ -296,9 +250,7 @@ define <vscale x 2 x i64> @ssra_intr_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ssra z0.d, z1.d, #4
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> %splat)
+  %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> splat(i64 4))
   %add = add <vscale x 2 x i64> %a, %shift
   ret <vscale x 2 x i64> %add
 }


        


More information about the llvm-commits mailing list