[llvm] 830e08b - [AArch64][SVE] Replace integer immediate intrinsics with splat vector variant

Danilo Carvalho Grael via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 18 10:18:24 PST 2019


Author: Danilo Carvalho Grael
Date: 2019-12-18T13:11:21-05:00
New Revision: 830e08b98bcb427136443093c282b25328137cf0

URL: https://github.com/llvm/llvm-project/commit/830e08b98bcb427136443093c282b25328137cf0
DIFF: https://github.com/llvm/llvm-project/commit/830e08b98bcb427136443093c282b25328137cf0.diff

LOG: [AArch64][SVE] Replace integer immediate intrinsics with splat vector variant

Summary: Replace the integer immediate intrisics with splat vector variants so they can be applied as optimizations for the C/C++ intrinsics.

Reviewers: sdesmalen, huntergr, rengolin, efriedma, c-rhodes, mgudim, kmclaughlin

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits, amehsan

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D71614

Added: 
    

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-int-imm.ll
    llvm/test/CodeGen/AArch64/sve-int-log-imm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index a3827aa33427..403a4058ae8b 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1069,12 +1069,6 @@ class AdvSIMD_GatherLoad_VecTorBase_Intrinsic
                 ],
                 [IntrReadMem, IntrArgMemOnly]>;
 
-class AdvSIMD_1VectorArg_Imm_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty],
-                [LLVMMatchType<0>,
-                 llvm_i32_ty],
-                [IntrNoMem, ImmArg<1>]>;
-
 class AdvSIMD_ScatterStore_64bitOffset_Intrinsic
     : Intrinsic<[],
                [
@@ -1104,12 +1098,6 @@ class AdvSIMD_ScatterStore_VectorBase_Intrinsic
                ],
                [IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>;
 
-class AdvSIMD_1VectorArg_Imm64_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty],
-                [LLVMMatchType<0>,
-                 llvm_i64_ty],
-                [IntrNoMem, ImmArg<1>]>;
-
 //
 // Loads
 //
@@ -1130,14 +1118,6 @@ def int_aarch64_sve_add   : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_sub   : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_subr  : AdvSIMD_Pred2VectorArg_Intrinsic;
 
-def int_aarch64_sve_add_imm    : AdvSIMD_1VectorArg_Imm_Intrinsic;
-def int_aarch64_sve_sub_imm    : AdvSIMD_1VectorArg_Imm_Intrinsic;
-def int_aarch64_sve_subr_imm   : AdvSIMD_1VectorArg_Imm_Intrinsic;
-def int_aarch64_sve_sqadd_imm  : AdvSIMD_1VectorArg_Imm_Intrinsic;
-def int_aarch64_sve_uqadd_imm  : AdvSIMD_1VectorArg_Imm_Intrinsic;
-def int_aarch64_sve_sqsub_imm  : AdvSIMD_1VectorArg_Imm_Intrinsic;
-def int_aarch64_sve_uqsub_imm  : AdvSIMD_1VectorArg_Imm_Intrinsic;
-
 def int_aarch64_sve_mul        : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_smulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_umulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
@@ -1277,10 +1257,6 @@ def int_aarch64_sve_orns        : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_nors        : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_nands       : AdvSIMD_Pred2VectorArg_Intrinsic;
 
-def int_aarch64_sve_orr_imm  : AdvSIMD_1VectorArg_Imm64_Intrinsic;
-def int_aarch64_sve_eor_imm  : AdvSIMD_1VectorArg_Imm64_Intrinsic;
-def int_aarch64_sve_and_imm  : AdvSIMD_1VectorArg_Imm64_Intrinsic;
-
 //
 // Conversion
 //

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 2a282d320fe9..5352f69ff1eb 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -93,13 +93,13 @@ let Predicates = [HasSVE] in {
   defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", int_aarch64_sve_and>;
   defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", int_aarch64_sve_bic>;
 
-  defm ADD_ZI   : sve_int_arith_imm0<0b000, "add", int_aarch64_sve_add_imm>;
-  defm SUB_ZI   : sve_int_arith_imm0<0b001, "sub", int_aarch64_sve_sub_imm>;
-  defm SUBR_ZI  : sve_int_arith_imm0<0b011, "subr", int_aarch64_sve_subr_imm>;
-  defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_imm>;
-  defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_imm>;
-  defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_imm>;
-  defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_imm>;
+  defm ADD_ZI   : sve_int_arith_imm0<0b000, "add", add>;
+  defm SUB_ZI   : sve_int_arith_imm0<0b001, "sub", sub>;
+  defm SUBR_ZI  : sve_int_arith_imm0_subr<0b011, "subr", sub>;
+  defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>;
+  defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>;
+  defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>;
+  defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>;
 
   defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
   defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
@@ -117,9 +117,9 @@ let Predicates = [HasSVE] in {
   defm EORV_VPZ  : sve_int_reduce_2<0b001, "eorv", AArch64eorv_pred>;
   defm ANDV_VPZ  : sve_int_reduce_2<0b010, "andv", AArch64andv_pred>;
 
-  defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", int_aarch64_sve_orr_imm>;
-  defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", int_aarch64_sve_eor_imm>;
-  defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", int_aarch64_sve_and_imm>;
+  defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", or>;
+  defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>;
+  defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>;
 
   defm SMAX_ZI   : sve_int_arith_imm1<0b00, "smax", simm8>;
   defm SMIN_ZI   : sve_int_arith_imm1<0b10, "smin", simm8>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 244397cbc377..946a4b33e5e5 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -299,14 +299,19 @@ class SVE_1_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
 : Pat<(vtd (op vt1:$Op1)),
       (inst $Op1)>;
 
+class SVE_1_Op_Imm_OptLsl_Reverse_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
+                                      ValueType it, ComplexPattern cpx, Instruction inst>
+  : Pat<(vt (op (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))), (vt zprty:$Op1))),
+        (inst $Op1, i32:$imm, i32:$shift)>;
+
 class SVE_1_Op_Imm_OptLsl_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
-                              ComplexPattern cpx, Instruction inst>
-  : Pat<(vt (op (vt zprty:$Op1), (i32 (cpx i32:$imm, i32:$shift)))),
+                              ValueType it, ComplexPattern cpx, Instruction inst>
+  : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))))),
         (inst $Op1, i32:$imm, i32:$shift)>;
 
 class SVE_1_Op_Imm_Log_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
-                           ComplexPattern cpx, Instruction inst>
-  : Pat<(vt (op (vt zprty:$Op1), (i64 (cpx i64:$imm)))),
+                           ValueType it, ComplexPattern cpx, Instruction inst>
+  : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))),
         (inst $Op1, i64:$imm)>;
 
 class SVE_2_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
@@ -1143,10 +1148,10 @@ class sve_int_log_imm<bits<2> opc, string asm>
 multiclass sve_int_log_imm<bits<2> opc, string asm, string alias, SDPatternOperator op> {
   def NAME : sve_int_log_imm<opc, asm>;
 
-  def : SVE_1_Op_Imm_Log_Pat<nxv16i8, op, ZPR8,  SVELogicalImm8Pat,  !cast<Instruction>(NAME)>;
-  def : SVE_1_Op_Imm_Log_Pat<nxv8i16, op, ZPR16, SVELogicalImm16Pat, !cast<Instruction>(NAME)>;
-  def : SVE_1_Op_Imm_Log_Pat<nxv4i32, op, ZPR32, SVELogicalImm32Pat, !cast<Instruction>(NAME)>;
-  def : SVE_1_Op_Imm_Log_Pat<nxv2i64, op, ZPR64, SVELogicalImm64Pat, !cast<Instruction>(NAME)>;
+  def : SVE_1_Op_Imm_Log_Pat<nxv16i8, op, ZPR8,  i32, SVELogicalImm8Pat,  !cast<Instruction>(NAME)>;
+  def : SVE_1_Op_Imm_Log_Pat<nxv8i16, op, ZPR16, i32, SVELogicalImm16Pat, !cast<Instruction>(NAME)>;
+  def : SVE_1_Op_Imm_Log_Pat<nxv4i32, op, ZPR32, i32, SVELogicalImm32Pat, !cast<Instruction>(NAME)>;
+  def : SVE_1_Op_Imm_Log_Pat<nxv2i64, op, ZPR64, i64, SVELogicalImm64Pat, !cast<Instruction>(NAME)>;
 
   def : InstAlias<asm # "\t$Zdn, $Zdn, $imm",
                   (!cast<Instruction>(NAME) ZPR8:$Zdn, sve_logical_imm8:$imm), 4>;
@@ -3320,10 +3325,22 @@ multiclass sve_int_arith_imm0<bits<3> opc, string asm, SDPatternOperator op> {
   def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>;
   def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>;
 
-  def : SVE_1_Op_Imm_OptLsl_Pat<nxv16i8, op, ZPR8,  SVEAddSubImm8Pat,  !cast<Instruction>(NAME # _B)>;
-  def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, op, ZPR16, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
-  def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, op, ZPR32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
-  def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, op, ZPR64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv16i8, op, ZPR8,  i32, SVEAddSubImm8Pat,  !cast<Instruction>(NAME # _B)>;
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, op, ZPR16, i32, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, op, ZPR32, i32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, op, ZPR64, i64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
+}
+
+multiclass sve_int_arith_imm0_subr<bits<3> opc, string asm, SDPatternOperator op> {
+  def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8,  addsub_imm8_opt_lsl_i8>;
+  def _H : sve_int_arith_imm0<0b01, opc, asm, ZPR16, addsub_imm8_opt_lsl_i16>;
+  def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>;
+  def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>;
+
+  def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv16i8, op, ZPR8,  i32, SVEAddSubImm8Pat,  !cast<Instruction>(NAME # _B)>;
+  def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv8i16, op, ZPR16, i32, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv4i32, op, ZPR32, i32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv2i64, op, ZPR64, i64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
 }
 
 class sve_int_arith_imm<bits<2> sz8_64, bits<6> opc, string asm,

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-imm.ll
index 30002771733e..57d9540c2a02 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-imm.ll
@@ -1,471 +1,519 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define <vscale x 16 x i8> @add_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: add_imm_i8_low
+;
+; SVE Arith Vector Immediate Unpredicated CodeGen
+;
+
+; ADD
+define <vscale x 16 x i8> @add_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: add_i8_low
 ; CHECK: add  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.add.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  add <vscale x 16 x i8> %a, %splat
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @add_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: add_imm_i16_low
+define <vscale x 8 x i16> @add_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: add_i16_low
 ; CHECK: add  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  add <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @add_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: add_imm_i16_high
+define <vscale x 8 x i16> @add_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: add_i16_high
 ; CHECK: add  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                    i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  add <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @add_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: add_imm_i32_low
+define <vscale x 4 x i32> @add_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: add_i32_low
 ; CHECK: add  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = add <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @add_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: add_imm_i32_high
+define <vscale x 4 x i32> @add_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: add_i32_high
 ; CHECK: add  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                    i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  add <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @add_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: add_imm_i64_low
+define <vscale x 2 x i64> @add_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: add_i64_low
 ; CHECK: add  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  add <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @add_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: add_imm_i64_high
+define <vscale x 2 x i64> @add_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: add_i64_high
 ; CHECK: add  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                    i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = add <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @sub_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: sub_imm_i8_low
-; CHECK: sub  z0.b, z0.b, #30
+; SUBR
+define <vscale x 16 x i8> @subr_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: subr_i8_low
+; CHECK: subr  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.sub.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  sub <vscale x 16 x i8> %splat, %a
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @sub_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sub_imm_i16_low
-; CHECK: sub  z0.h, z0.h, #30
+define <vscale x 8 x i16> @subr_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: subr_i16_low
+; CHECK: subr  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  sub <vscale x 8 x i16> %splat, %a
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @sub_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sub_imm_i16_high
-; CHECK: sub  z0.h, z0.h, #1024
+define <vscale x 8 x i16> @subr_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: subr_i16_high
+; CHECK: subr  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                    i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  sub <vscale x 8 x i16> %splat, %a
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @sub_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sub_imm_i32_low
-; CHECK: sub  z0.s, z0.s, #30
+define <vscale x 4 x i32> @subr_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: subr_i32_low
+; CHECK: subr  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  sub <vscale x 4 x i32> %splat, %a
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @sub_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sub_imm_i32_high
-; CHECK: sub  z0.s, z0.s, #1024
+define <vscale x 4 x i32> @subr_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: subr_i32_high
+; CHECK: subr  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                    i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  sub <vscale x 4 x i32> %splat, %a
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @sub_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sub_imm_i64_low
-; CHECK: sub  z0.d, z0.d, #30
+define <vscale x 2 x i64> @subr_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: subr_i64_low
+; CHECK: subr  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                    i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  sub <vscale x 2 x i64> %splat, %a
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @sub_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sub_imm_i64_high
-; CHECK: sub  z0.d, z0.d, #1024
+define <vscale x 2 x i64> @subr_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: subr_i64_high
+; CHECK: subr  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                    i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  sub <vscale x 2 x i64> %splat, %a
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @subr_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: subr_imm_i8_low
-; CHECK: subr  z0.b, z0.b, #30
+; SUB
+define <vscale x 16 x i8> @sub_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sub_i8_low
+; CHECK: sub  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.subr.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                     i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  sub <vscale x 16 x i8> %a, %splat
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @subr_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: subr_imm_i16_low
-; CHECK: subr  z0.h, z0.h, #30
+define <vscale x 8 x i16> @sub_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sub_i16_low
+; CHECK: sub  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                     i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  sub <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @subr_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: subr_imm_i16_high
-; CHECK: subr  z0.h, z0.h, #1024
+define <vscale x 8 x i16> @sub_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sub_i16_high
+; CHECK: sub  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                     i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  sub <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @subr_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: subr_imm_i32_low
-; CHECK: subr  z0.s, z0.s, #30
+define <vscale x 4 x i32> @sub_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sub_i32_low
+; CHECK: sub  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                     i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = sub <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @subr_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: subr_imm_i32_high
-; CHECK: subr  z0.s, z0.s, #1024
+define <vscale x 4 x i32> @sub_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sub_i32_high
+; CHECK: sub  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                     i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  sub <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @subr_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: subr_imm_i64_low
-; CHECK: subr  z0.d, z0.d, #30
+define <vscale x 2 x i64> @sub_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sub_i64_low
+; CHECK: sub  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                     i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  sub <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @subr_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: subr_imm_i64_high
-; CHECK: subr  z0.d, z0.d, #1024
+define <vscale x 2 x i64> @sub_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sub_i64_high
+; CHECK: sub  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                     i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = sub <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @sqadd_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: sqadd_imm_i8_low
+; SQADD
+define <vscale x 16 x i8> @sqadd_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sqadd_i8_low
 ; CHECK: sqadd  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @sqadd_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqadd_imm_i16_low
+define <vscale x 8 x i16> @sqadd_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqadd_i16_low
 ; CHECK: sqadd  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @sqadd_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqadd_imm_i16_high
+define <vscale x 8 x i16> @sqadd_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqadd_i16_high
 ; CHECK: sqadd  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @sqadd_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqadd_imm_i32_low
+define <vscale x 4 x i32> @sqadd_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_i32_low
 ; CHECK: sqadd  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @sqadd_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqadd_imm_i32_high
+define <vscale x 4 x i32> @sqadd_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_i32_high
 ; CHECK: sqadd  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @sqadd_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqadd_imm_i64_low
+define <vscale x 2 x i64> @sqadd_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqadd_i64_low
 ; CHECK: sqadd  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @sqadd_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqadd_imm_i64_high
+define <vscale x 2 x i64> @sqadd_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqadd_i64_high
 ; CHECK: sqadd  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @uqadd_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: uqadd_imm_i8_low
+; UQADD
+define <vscale x 16 x i8> @uqadd_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uqadd_i8_low
 ; CHECK: uqadd  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @uqadd_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqadd_imm_i16_low
+define <vscale x 8 x i16> @uqadd_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqadd_i16_low
 ; CHECK: uqadd  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @uqadd_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqadd_imm_i16_high
+define <vscale x 8 x i16> @uqadd_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqadd_i16_high
 ; CHECK: uqadd  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @uqadd_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqadd_imm_i32_low
+define <vscale x 4 x i32> @uqadd_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqadd_i32_low
 ; CHECK: uqadd  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @uqadd_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqadd_imm_i32_high
+define <vscale x 4 x i32> @uqadd_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqadd_i32_high
 ; CHECK: uqadd  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @uqadd_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqadd_imm_i64_low
+define <vscale x 2 x i64> @uqadd_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqadd_i64_low
 ; CHECK: uqadd  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @uqadd_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqadd_imm_i64_high
+define <vscale x 2 x i64> @uqadd_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqadd_i64_high
 ; CHECK: uqadd  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @sqsub_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: sqsub_imm_i8_low
+; SQSUB
+define <vscale x 16 x i8> @sqsub_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sqsub_i8_low
 ; CHECK: sqsub  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @sqsub_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqsub_imm_i16_low
+define <vscale x 8 x i16> @sqsub_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqsub_i16_low
 ; CHECK: sqsub  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @sqsub_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqsub_imm_i16_high
+define <vscale x 8 x i16> @sqsub_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqsub_i16_high
 ; CHECK: sqsub  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @sqsub_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqsub_imm_i32_low
+define <vscale x 4 x i32> @sqsub_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqsub_i32_low
 ; CHECK: sqsub  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @sqsub_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqsub_imm_i32_high
+define <vscale x 4 x i32> @sqsub_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqsub_i32_high
 ; CHECK: sqsub  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @sqsub_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqsub_imm_i64_low
+define <vscale x 2 x i64> @sqsub_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_i64_low
 ; CHECK: sqsub  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @sqsub_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqsub_imm_i64_high
+define <vscale x 2 x i64> @sqsub_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_i64_high
 ; CHECK: sqsub  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @uqsub_imm_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: uqsub_imm_i8_low
+; UQSUB
+define <vscale x 16 x i8> @uqsub_i8_low(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uqsub_i8_low
 ; CHECK: uqsub  z0.b, z0.b, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.imm.nxv16i8(<vscale x 16 x i8> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res =  call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @uqsub_imm_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqsub_imm_i16_low
+define <vscale x 8 x i16> @uqsub_i16_low(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqsub_i16_low
 ; CHECK: uqsub  z0.h, z0.h, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @uqsub_imm_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqsub_imm_i16_high
+define <vscale x 8 x i16> @uqsub_i16_high(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqsub_i16_high
 ; CHECK: uqsub  z0.h, z0.h, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @uqsub_imm_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqsub_imm_i32_low
+define <vscale x 4 x i32> @uqsub_i32_low(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqsub_i32_low
 ; CHECK: uqsub  z0.s, z0.s, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @uqsub_imm_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqsub_imm_i32_high
+define <vscale x 4 x i32> @uqsub_i32_high(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqsub_i32_high
 ; CHECK: uqsub  z0.s, z0.s, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @uqsub_imm_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqsub_imm_i64_low
+define <vscale x 2 x i64> @uqsub_i64_low(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqsub_i64_low
 ; CHECK: uqsub  z0.d, z0.d, #30
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 30)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @uqsub_imm_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqsub_imm_i64_high
+define <vscale x 2 x i64> @uqsub_i64_high(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqsub_i64_high
 ; CHECK: uqsub  z0.d, z0.d, #1024
 ; CHECK-NEXT: ret
-  %res =  call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                      i32 1024)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.add.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64>, i32)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.sub.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64>, i32)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.subr.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64>, i32)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64>, i32)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64>, i32)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64>, i32)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.imm.nxv16i8(<vscale x 16 x i8>, i32)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64>, i32)
+declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
index 0b95300071ee..52b56d5adb5e 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-log-imm.ll
@@ -1,11 +1,17 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
+;
+; SVE Logical Vector Immediate Unpredicated CodeGen
+;
+
+; ORR
 define <vscale x 16 x i8> @orr_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: orr_i8:
 ; CHECK: orr z0.b, z0.b, #0xf
 ; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.imm.nxv16i8(<vscale x 16 x i8> %a, 
-                                                                   i64 15)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res = or <vscale x 16 x i8> %a, %splat
   ret <vscale x 16 x i8> %res
 }
 
@@ -13,8 +19,9 @@ define <vscale x 8 x i16> @orr_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: orr_i16:
 ; CHECK: orr z0.h, z0.h, #0xfc07
 ; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.imm.nxv8i16(<vscale x 8 x i16> %a, 
-                                                                   i64 64519)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res = or <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
@@ -22,8 +29,9 @@ define <vscale x 4 x i32> @orr_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: orr_i32:
 ; CHECK: orr z0.s, z0.s, #0xffff00
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.imm.nxv4i32(<vscale x 4 x i32> %a, 
-                                                                   i64 16776960)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = or <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
@@ -31,17 +39,20 @@ define <vscale x 2 x i64> @orr_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: orr_i64:
 ; CHECK: orr z0.d, z0.d, #0xfffc000000000000
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.imm.nxv2i64(<vscale x 2 x i64> %a, 
-                                                                   i64 18445618173802708992)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = or <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
 
+; EOR
 define <vscale x 16 x i8> @eor_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: eor_i8:
 ; CHECK: eor z0.b, z0.b, #0xf
 ; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.imm.nxv16i8(<vscale x 16 x i8> %a, 
-                                                                   i64 15)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res = xor <vscale x 16 x i8> %a, %splat
   ret <vscale x 16 x i8> %res
 }
 
@@ -49,8 +60,9 @@ define <vscale x 8 x i16> @eor_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: eor_i16:
 ; CHECK: eor z0.h, z0.h, #0xfc07
 ; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.imm.nxv8i16(<vscale x 8 x i16> %a, 
-                                                                   i64 64519)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res = xor <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
@@ -58,8 +70,9 @@ define <vscale x 4 x i32> @eor_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: eor_i32:
 ; CHECK: eor z0.s, z0.s, #0xffff00
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.imm.nxv4i32(<vscale x 4 x i32> %a, 
-                                                                   i64 16776960)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = xor <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
@@ -67,17 +80,20 @@ define <vscale x 2 x i64> @eor_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: eor_i64:
 ; CHECK: eor z0.d, z0.d, #0xfffc000000000000
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.imm.nxv2i64(<vscale x 2 x i64> %a, 
-                                                                   i64 18445618173802708992)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = xor <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
 
+; AND
 define <vscale x 16 x i8> @and_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: and_i8:
 ; CHECK: and z0.b, z0.b, #0xf
 ; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.and.imm.nxv16i8(<vscale x 16 x i8> %a, 
-                                                                   i64 15)
+  %elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %res = and <vscale x 16 x i8> %a, %splat
   ret <vscale x 16 x i8> %res
 }
 
@@ -85,8 +101,9 @@ define <vscale x 8 x i16> @and_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: and_i16:
 ; CHECK: and z0.h, z0.h, #0xfc07
 ; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.and.imm.nxv8i16(<vscale x 8 x i16> %a, 
-                                                                   i64 64519)
+  %elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res = and <vscale x 8 x i16> %a, %splat
   ret <vscale x 8 x i16> %res
 }
 
@@ -94,8 +111,9 @@ define <vscale x 4 x i32> @and_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: and_i32:
 ; CHECK: and z0.s, z0.s, #0xffff00
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.and.imm.nxv4i32(<vscale x 4 x i32> %a, 
-                                                                   i64 16776960)
+  %elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = and <vscale x 4 x i32> %a, %splat
   ret <vscale x 4 x i32> %res
 }
 
@@ -103,20 +121,8 @@ define <vscale x 2 x i64> @and_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: and_i64:
 ; CHECK: and z0.d, z0.d, #0xfffc000000000000
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.and.imm.nxv2i64(<vscale x 2 x i64> %a,
-                                                                   i64 18445618173802708992)
+  %elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = and <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
-
-declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.imm.nxv16i8(<vscale x 16 x i8>, i64)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.imm.nxv8i16(<vscale x 8 x i16>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.imm.nxv4i32(<vscale x 4 x i32>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.imm.nxv2i64(<vscale x 2 x i64>, i64)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.imm.nxv16i8(<vscale x 16 x i8>, i64)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.imm.nxv8i16(<vscale x 8 x i16>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.imm.nxv4i32(<vscale x 4 x i32>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.imm.nxv2i64(<vscale x 2 x i64>, i64)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.and.imm.nxv16i8(<vscale x 16 x i8>, i64)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.and.imm.nxv8i16(<vscale x 8 x i16>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.and.imm.nxv4i32(<vscale x 4 x i32>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.and.imm.nxv2i64(<vscale x 2 x i64>, i64)


        


More information about the llvm-commits mailing list