[llvm] 86c4ef5 - [AArch64] Add patterns for sub from add negative immediates (#156024)

via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 31 07:12:37 PDT 2025


Author: David Green
Date: 2025-08-31T15:12:33+01:00
New Revision: 86c4ef506d96468d3f4c196a61767c40ce180d2e

URL: https://github.com/llvm/llvm-project/commit/86c4ef506d96468d3f4c196a61767c40ce180d2e
DIFF: https://github.com/llvm/llvm-project/commit/86c4ef506d96468d3f4c196a61767c40ce180d2e.diff

LOG: [AArch64] Add patterns for sub from add negative immediates (#156024)

`sub 3` will be canonicalized in llvm to `add -3`. This adds some
tablegen patterns for add from a negative immediate so that we can still
generate sub imm SVE instructions.

The alternative is to add a isel combine, which seemed to work but
created problems for mad and index patterns. This version does still
need to add a lower-than-default Complexity to the ComplexPatterns to
ensure that index was selected over sub-imm + index, as the default
Complexity on ComplexPatterns is quite high.

Fixes #155928

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-fixed-length-bitselect.ll
    llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
    llvm/test/CodeGen/AArch64/sve-int-imm.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index bc786f415b554..6fdc981fc21a5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -246,9 +246,9 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
     return false;
   }
 
-  template<MVT::SimpleValueType VT>
+  template <MVT::SimpleValueType VT, bool Negate>
   bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
-    return SelectSVEAddSubImm(N, VT, Imm, Shift);
+    return SelectSVEAddSubImm(N, VT, Imm, Shift, Negate);
   }
 
   template <MVT::SimpleValueType VT, bool Negate>
@@ -489,7 +489,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
 
   bool SelectCMP_SWAP(SDNode *N);
 
-  bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
+  bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
+                          bool Negate);
   bool SelectSVEAddSubSSatImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
                               bool Negate);
   bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
@@ -4227,35 +4228,36 @@ bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
 }
 
 bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
-                                             SDValue &Shift) {
+                                             SDValue &Shift, bool Negate) {
   if (!isa<ConstantSDNode>(N))
     return false;
 
   SDLoc DL(N);
-  uint64_t Val = cast<ConstantSDNode>(N)
-                     ->getAPIntValue()
-                     .trunc(VT.getFixedSizeInBits())
-                     .getZExtValue();
+  APInt Val =
+      cast<ConstantSDNode>(N)->getAPIntValue().trunc(VT.getFixedSizeInBits());
+
+  if (Negate)
+    Val = -Val;
 
   switch (VT.SimpleTy) {
   case MVT::i8:
     // All immediates are supported.
     Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
-    Imm = CurDAG->getTargetConstant(Val, DL, MVT::i32);
+    Imm = CurDAG->getTargetConstant(Val.getZExtValue(), DL, MVT::i32);
     return true;
   case MVT::i16:
   case MVT::i32:
   case MVT::i64:
     // Support 8bit unsigned immediates.
-    if (Val <= 255) {
+    if ((Val & ~0xff) == 0) {
       Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
-      Imm = CurDAG->getTargetConstant(Val, DL, MVT::i32);
+      Imm = CurDAG->getTargetConstant(Val.getZExtValue(), DL, MVT::i32);
       return true;
     }
     // Support 16bit unsigned immediates that are a multiple of 256.
-    if (Val <= 65280 && Val % 256 == 0) {
+    if ((Val & ~0xff00) == 0) {
       Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
-      Imm = CurDAG->getTargetConstant(Val >> 8, DL, MVT::i32);
+      Imm = CurDAG->getTargetConstant(Val.lshr(8).getZExtValue(), DL, MVT::i32);
       return true;
     }
     break;

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index eeb47b4d99750..bc65af2d190f6 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -650,7 +650,7 @@ let Predicates = [HasSVE_or_SME, UseExperimentalZeroingPseudos] in {
 
 let Predicates = [HasSVE_or_SME] in {
   defm ADD_ZI   : sve_int_arith_imm0<0b000, "add", add>;
-  defm SUB_ZI   : sve_int_arith_imm0<0b001, "sub", sub>;
+  defm SUB_ZI   : sve_int_arith_imm0<0b001, "sub", sub, add>;
   defm SUBR_ZI  : sve_int_arith_imm0<0b011, "subr", AArch64subr>;
   defm SQADD_ZI : sve_int_arith_imm0_ssat<0b100, "sqadd", saddsat, ssubsat>;
   defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 13599f505dd4e..74e4a7feb49d0 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -315,10 +315,16 @@ def addsub_imm8_opt_lsl_i16 : imm8_opt_lsl<16, "uint16_t", SVEAddSubImmOperand16
 def addsub_imm8_opt_lsl_i32 : imm8_opt_lsl<32, "uint32_t", SVEAddSubImmOperand32>;
 def addsub_imm8_opt_lsl_i64 : imm8_opt_lsl<64, "uint64_t", SVEAddSubImmOperand64>;
 
-def SVEAddSubImm8Pat  : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i8>", []>;
-def SVEAddSubImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i16>", []>;
-def SVEAddSubImm32Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i32>", []>;
-def SVEAddSubImm64Pat : ComplexPattern<i64, 2, "SelectSVEAddSubImm<MVT::i64>", []>;
+let Complexity = 1 in {
+def SVEAddSubImm8Pat  : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i8, false>", []>;
+def SVEAddSubImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i16, false>", []>;
+def SVEAddSubImm32Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i32, false>", []>;
+def SVEAddSubImm64Pat : ComplexPattern<i64, 2, "SelectSVEAddSubImm<MVT::i64, false>", []>;
+
+def SVEAddSubNegImm8Pat  : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i8, true>", []>;
+def SVEAddSubNegImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i16, true>", []>;
+def SVEAddSubNegImm32Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i32, true>", []>;
+def SVEAddSubNegImm64Pat : ComplexPattern<i64, 2, "SelectSVEAddSubImm<MVT::i64, true>", []>;
 
 def SVEAddSubSSatNegImm8Pat  : ComplexPattern<i32, 2, "SelectSVEAddSubSSatImm<MVT::i8, true>", []>;
 def SVEAddSubSSatNegImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubSSatImm<MVT::i16, true>", []>;
@@ -329,6 +335,7 @@ def SVEAddSubSSatPosImm8Pat  : ComplexPattern<i32, 2, "SelectSVEAddSubSSatImm<MV
 def SVEAddSubSSatPosImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubSSatImm<MVT::i16, false>", []>;
 def SVEAddSubSSatPosImm32Pat : ComplexPattern<i32, 2, "SelectSVEAddSubSSatImm<MVT::i32, false>", []>;
 def SVEAddSubSSatPosImm64Pat : ComplexPattern<i64, 2, "SelectSVEAddSubSSatImm<MVT::i64, false>", []>;
+} // Complexity = 1
 
 def SVECpyDupImm8Pat  : ComplexPattern<i32, 2, "SelectSVECpyDupImm<MVT::i8>", []>;
 def SVECpyDupImm16Pat : ComplexPattern<i32, 2, "SelectSVECpyDupImm<MVT::i16>", []>;
@@ -5221,7 +5228,8 @@ class sve_int_arith_imm0<bits<2> sz8_64, bits<3> opc, string asm,
   let hasSideEffects = 0;
 }
 
-multiclass sve_int_arith_imm0<bits<3> opc, string asm, SDPatternOperator op> {
+multiclass sve_int_arith_imm0<bits<3> opc, string asm, SDPatternOperator op,
+                              SDPatternOperator inv_op = null_frag> {
   def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8,  addsub_imm8_opt_lsl_i8>;
   def _H : sve_int_arith_imm0<0b01, opc, asm, ZPR16, addsub_imm8_opt_lsl_i16>;
   def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>;
@@ -5231,6 +5239,12 @@ multiclass sve_int_arith_imm0<bits<3> opc, string asm, SDPatternOperator op> {
   def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, op, ZPR16, i32, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
   def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, op, ZPR32, i32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
   def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, op, ZPR64, i64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
+
+  // Extra patterns for add(x, splat(-ve)) -> sub(x, +ve). There is no i8
+  // pattern as all i8 constants can be handled by an add.
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, inv_op, ZPR16, i32, SVEAddSubNegImm16Pat, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, inv_op, ZPR32, i32, SVEAddSubNegImm32Pat, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, inv_op, ZPR64, i64, SVEAddSubNegImm64Pat, !cast<Instruction>(NAME # _D)>;
 }
 
 multiclass sve_int_arith_imm0_ssat<bits<3> opc, string asm, SDPatternOperator op,

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-bitselect.ll
index fb494afa11de2..258e399018ba8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-bitselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-bitselect.ll
@@ -13,15 +13,15 @@ define void @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %right_
 ; CHECK-LABEL: fixed_bitselect_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl8
-; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
 ; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x2]
-; CHECK-NEXT:    add z1.s, z0.s, z1.s
-; CHECK-NEXT:    subr z0.s, z0.s, #0 // =0x0
-; CHECK-NEXT:    and z0.d, z0.d, z2.d
-; CHECK-NEXT:    and z1.d, z1.d, z3.d
-; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    sub z0.s, z0.s, #1 // =0x1
+; CHECK-NEXT:    subr z2.s, z2.s, #0 // =0x0
+; CHECK-NEXT:    and z0.d, z0.d, z3.d
+; CHECK-NEXT:    and z1.d, z2.d, z1.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x3]
 ; CHECK-NEXT:    ret
   %pre_cond = load <8 x i32>, ptr %pre_cond_ptr

diff  --git a/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll b/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
index 433ddbd4a261b..cf2ae02c14b18 100644
--- a/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
@@ -94,8 +94,8 @@ define <4 x i32> @v4i32_neg_immediates() #0 {
 define <4 x i32> @v4i32_out_range_start() #0 {
 ; CHECK-LABEL: v4i32_out_range_start:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    add z0.s, z0.s, #16 // =0x10
+; CHECK-NEXT:    mov w8, #16 // =0x10
+; CHECK-NEXT:    index z0.s, w8, #1
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 16, i32 17, i32 18, i32 19>

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-imm.ll
index e34f4840f517c..985b7b9597705 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-imm.ll
@@ -229,8 +229,7 @@ define <vscale x 16 x i8> @addnve_i8_low(<vscale x 16 x i8> %a) {
 define <vscale x 8 x i16> @addnve_i16_low(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: addnve_i16_low:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.h, #-30 // =0xffffffffffffffe2
-; CHECK-NEXT:    add z0.h, z0.h, z1.h
+; CHECK-NEXT:    sub z0.h, z0.h, #30 // =0x1e
 ; CHECK-NEXT:    ret
   %res =  add <vscale x 8 x i16> %a, splat(i16 -30)
   ret <vscale x 8 x i16> %res
@@ -248,8 +247,7 @@ define <vscale x 8 x i16> @addnve_i16_high(<vscale x 8 x i16> %a) {
 define <vscale x 4 x i32> @addnve_i32_low(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: addnve_i32_low:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.s, #-30 // =0xffffffffffffffe2
-; CHECK-NEXT:    add z0.s, z0.s, z1.s
+; CHECK-NEXT:    sub z0.s, z0.s, #30 // =0x1e
 ; CHECK-NEXT:    ret
   %res = add <vscale x 4 x i32> %a, splat(i32 -30)
   ret <vscale x 4 x i32> %res
@@ -258,8 +256,7 @@ define <vscale x 4 x i32> @addnve_i32_low(<vscale x 4 x i32> %a) {
 define <vscale x 4 x i32> @addnve_i32_high(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: addnve_i32_high:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.s, #-1024 // =0xfffffffffffffc00
-; CHECK-NEXT:    add z0.s, z0.s, z1.s
+; CHECK-NEXT:    sub z0.s, z0.s, #1024 // =0x400
 ; CHECK-NEXT:    ret
   %res =  add <vscale x 4 x i32> %a, splat(i32 -1024)
   ret <vscale x 4 x i32> %res
@@ -268,8 +265,7 @@ define <vscale x 4 x i32> @addnve_i32_high(<vscale x 4 x i32> %a) {
 define <vscale x 2 x i64> @addnve_i64_low(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: addnve_i64_low:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.d, #-30 // =0xffffffffffffffe2
-; CHECK-NEXT:    add z0.d, z0.d, z1.d
+; CHECK-NEXT:    sub z0.d, z0.d, #30 // =0x1e
 ; CHECK-NEXT:    ret
   %res =  add <vscale x 2 x i64> %a, splat(i64 -30)
   ret <vscale x 2 x i64> %res
@@ -278,8 +274,7 @@ define <vscale x 2 x i64> @addnve_i64_low(<vscale x 2 x i64> %a) {
 define <vscale x 2 x i64> @addnve_i64_high(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: addnve_i64_high:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.d, #-1024 // =0xfffffffffffffc00
-; CHECK-NEXT:    add z0.d, z0.d, z1.d
+; CHECK-NEXT:    sub z0.d, z0.d, #1024 // =0x400
 ; CHECK-NEXT:    ret
   %res = add <vscale x 2 x i64> %a, splat(i64 -1024)
   ret <vscale x 2 x i64> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
index d29e43509dfe9..71396da004002 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
@@ -14,20 +14,21 @@ target triple = "aarch64"
 define <8 x i32> @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %right_ptr) {
 ; CHECK-LABEL: fixed_bitselect_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    ldp q2, q1, [x0]
+; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    ldp q5, q4, [x1]
 ; CHECK-NEXT:    ldp q6, q7, [x2]
-; CHECK-NEXT:    add z3.s, z1.s, z0.s
-; CHECK-NEXT:    subr z1.s, z1.s, #0 // =0x0
-; CHECK-NEXT:    add z0.s, z2.s, z0.s
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    sub z1.s, z1.s, #1 // =0x1
+; CHECK-NEXT:    sub z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    subr z2.s, z2.s, #0 // =0x0
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    and z3.d, z3.d, z7.d
-; CHECK-NEXT:    and z0.d, z0.d, z6.d
-; CHECK-NEXT:    and z2.d, z2.d, z5.d
-; CHECK-NEXT:    orr z1.d, z3.d, z1.d
-; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    subr z3.s, z3.s, #0 // =0x0
+; CHECK-NEXT:    and z2.d, z2.d, z4.d
+; CHECK-NEXT:    and z3.d, z3.d, z5.d
+; CHECK-NEXT:    and z4.d, z0.d, z7.d
+; CHECK-NEXT:    and z0.d, z1.d, z6.d
+; CHECK-NEXT:    orr z1.d, z4.d, z2.d
+; CHECK-NEXT:    orr z0.d, z0.d, z3.d
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
index 3a6445dd1d99b..d226fc89c3381 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
@@ -69,9 +69,9 @@ define void @build_vector_0_dec3_v8i32(ptr %a) {
 ; CHECK-LABEL: build_vector_0_dec3_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, #-3
-; CHECK-NEXT:    mov z1.s, #-12 // =0xfffffffffffffff4
-; CHECK-NEXT:    add z1.s, z0.s, z1.s
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    sub z0.s, z0.s, #12 // =0xc
+; CHECK-NEXT:    str q0, [x0, #16]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: build_vector_0_dec3_v8i32:
@@ -91,11 +91,10 @@ define void @build_vector_minus2_dec32_v4i64(ptr %a) {
 ; CHECK-LABEL: build_vector_minus2_dec32_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #-32 // =0xffffffffffffffe0
-; CHECK-NEXT:    mov z1.d, #-66 // =0xffffffffffffffbe
-; CHECK-NEXT:    mov z2.d, #-2 // =0xfffffffffffffffe
 ; CHECK-NEXT:    index z0.d, #0, x8
-; CHECK-NEXT:    add z1.d, z0.d, z1.d
-; CHECK-NEXT:    add z0.d, z0.d, z2.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    sub z0.d, z0.d, #2 // =0x2
+; CHECK-NEXT:    sub z1.d, z1.d, #66 // =0x42
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
 ;


        


More information about the llvm-commits mailing list