[llvm] r367700 - [AArch64][GlobalISel] Support the neg_addsub_shifted_imm32 pattern

Jessica Paquette via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 2 11:12:53 PDT 2019


Author: paquette
Date: Fri Aug  2 11:12:53 2019
New Revision: 367700

URL: http://llvm.org/viewvc/llvm-project?rev=367700&view=rev
Log:
[AArch64][GlobalISel] Support the neg_addsub_shifted_imm32 pattern

Add an equivalent ComplexRendererFns function for SelectNegArithImmed. This
allows us to select immediate adds of -1 by turning them into subtracts.

Update select-binop.mir to show that the pattern works.

Differential Revision: https://reviews.llvm.org/D65460

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td
    llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td?rev=367700&r1=367699&r2=367700&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td Fri Aug  2 11:12:53 2019
@@ -885,6 +885,14 @@ class neg_addsub_shifted_imm<ValueType T
 def neg_addsub_shifted_imm32 : neg_addsub_shifted_imm<i32>;
 def neg_addsub_shifted_imm64 : neg_addsub_shifted_imm<i64>;
 
+def gi_neg_addsub_shifted_imm32 :
+    GIComplexOperandMatcher<s32, "selectNegArithImmed">,
+    GIComplexPatternEquiv<neg_addsub_shifted_imm32>;
+
+def gi_neg_addsub_shifted_imm64 :
+    GIComplexOperandMatcher<s64, "selectNegArithImmed">,
+    GIComplexPatternEquiv<neg_addsub_shifted_imm64>;
+
 // An extend operand:
 //  {5-3} - extend type
 //  {2-0} - imm3

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp?rev=367700&r1=367699&r2=367700&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp Fri Aug  2 11:12:53 2019
@@ -162,7 +162,9 @@ private:
   ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const;
   ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const;
 
+  ComplexRendererFns select12BitValueWithLeftShift(uint64_t Immed) const;
   ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
+  ComplexRendererFns selectNegArithImmed(MachineOperand &Root) const;
 
   ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
                                             unsigned Size) const;
@@ -4081,6 +4083,30 @@ AArch64InstructionSelector::selectShiftB
   return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
 }
 
+/// Helper to select an immediate value that can be represented as a 12-bit
+/// value shifted left by either 0 or 12. If it is possible to do so, return
+/// the immediate and shift value. If not, return None.
+///
+/// Used by selectArithImmed and selectNegArithImmed.
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::select12BitValueWithLeftShift(
+    uint64_t Immed) const {
+  unsigned ShiftAmt;
+  if (Immed >> 12 == 0) {
+    ShiftAmt = 0;
+  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
+    ShiftAmt = 12;
+    Immed = Immed >> 12;
+  } else
+    return None;
+
+  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
+  return {{
+      [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
+      [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
+  }};
+}
+
 /// SelectArithImmed - Select an immediate value that can be represented as
 /// a 12-bit value shifted left by either 0 or 12.  If so, return true with
 /// Val set to the 12-bit value and Shift set to the shifter operand.
@@ -4094,22 +4120,41 @@ AArch64InstructionSelector::selectArithI
   auto MaybeImmed = getImmedFromMO(Root);
   if (MaybeImmed == None)
     return None;
+  return select12BitValueWithLeftShift(*MaybeImmed);
+}
+
+/// SelectNegArithImmed - As above, but negates the value before trying to
+/// select it.
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::selectNegArithImmed(MachineOperand &Root) const {
+  // We need a register here, because we need to know if we have a 64 or 32
+  // bit immediate.
+  if (!Root.isReg())
+    return None;
+  auto MaybeImmed = getImmedFromMO(Root);
+  if (MaybeImmed == None)
+    return None;
   uint64_t Immed = *MaybeImmed;
-  unsigned ShiftAmt;
 
-  if (Immed >> 12 == 0) {
-    ShiftAmt = 0;
-  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
-    ShiftAmt = 12;
-    Immed = Immed >> 12;
-  } else
+  // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
+  // have the opposite effect on the C flag, so this pattern mustn't match under
+  // those circumstances.
+  if (Immed == 0)
     return None;
 
-  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
-  return {{
-      [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
-      [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
-  }};
+  // Check if we're dealing with a 32-bit type on the root or a 64-bit type on
+  // the root.
+  MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo();
+  if (MRI.getType(Root.getReg()).getSizeInBits() == 32)
+    Immed = ~((uint32_t)Immed) + 1;
+  else
+    Immed = ~Immed + 1ULL;
+
+  if (Immed & 0xFFFFFFFFFF000000ULL)
+    return None;
+
+  Immed &= 0xFFFFFFULL;
+  return select12BitValueWithLeftShift(Immed);
 }
 
 /// Return true if it is worth folding MI into an extended register. That is,

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir?rev=367700&r1=367699&r2=367700&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir Fri Aug  2 11:12:53 2019
@@ -10,6 +10,13 @@
   define void @add_imm_s32_gpr() { ret void }
   define void @add_imm_s64_gpr() { ret void }
 
+  define void @add_neg_s32_gpr() { ret void }
+  define void @add_neg_s64_gpr() { ret void }
+  define void @add_neg_invalid_immed_s32() { ret void }
+  define void @add_neg_invalid_immed_s64() { ret void }
+  define void @add_imm_0_s32() { ret void }
+  define void @add_imm_0_s64() { ret void }
+
   define void @add_imm_s32_gpr_bb() { ret void }
 
   define void @sub_s32_gpr() { ret void }
@@ -159,6 +166,154 @@ body:             |
     %2(s64) = G_ADD %0, %1
     $x0 = COPY %2(s64)
 ...
+
+---
+name:            add_neg_s32_gpr
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $w1, $w2
+    ; We should be able to turn the ADD into a SUB.
+    ; CHECK-LABEL: name: add_neg_s32_gpr
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w1
+    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+    ; CHECK: $w2 = COPY [[SUBSWri]]
+    %0(s32) = COPY $w1
+    %1(s32) = G_CONSTANT i32 -1
+    %2(s32) = G_ADD %0, %1
+    $w2 = COPY %2(s32)
+...
+
+---
+name:            add_neg_s64_gpr
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+    ; We should be able to turn the ADD into a SUB.
+    ; CHECK-LABEL: name: add_neg_s64_gpr
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 0, implicit-def $nzcv
+    ; CHECK: $x0 = COPY [[SUBSXri]]
+    %0(s64) = COPY $x0
+    %1(s64) = G_CONSTANT i64 -1
+    %2(s64) = G_ADD %0, %1
+    $x0 = COPY %2(s64)
+...
+
+---
+name:            add_neg_invalid_immed_s32
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+    ; We can't select this if the value is out of range.
+    ; CHECK-LABEL: name: add_neg_invalid_immed_s32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -5000
+    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY]], [[MOVi64imm]]
+    ; CHECK: $x0 = COPY [[ADDXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = G_CONSTANT i64 -5000
+    %2(s64) = G_ADD %0, %1
+    $x0 = COPY %2(s64)
+...
+
+---
+name:            add_neg_invalid_immed_s64
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+    ; We can't select this if the value is out of range.
+    ; CHECK-LABEL: name: add_neg_invalid_immed_s64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -5000
+    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY]], [[MOVi64imm]]
+    ; CHECK: $x0 = COPY [[ADDXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = G_CONSTANT i64 -5000
+    %2(s64) = G_ADD %0, %1
+    $x0 = COPY %2(s64)
+...
+
+---
+name:            add_imm_0_s32
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+    ; We shouldn't get a SUB here, because "cmp wN, $0" and "cmp wN, #0" have
+    ; opposite effects on the C flag.
+    ; CHECK-LABEL: name: add_imm_0_s32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 0, 0
+    ; CHECK: $x0 = COPY [[ADDXri]]
+    %0(s64) = COPY $x0
+    %1(s64) = G_CONSTANT i64 0
+    %2(s64) = G_ADD %0, %1
+    $x0 = COPY %2(s64)
+...
+
+---
+name:            add_imm_0_s64
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+    ; We shouldn't get a SUB here, because "cmp xN, $0" and "cmp xN, #0" have
+    ; opposite effects on the C flag.
+    ; CHECK-LABEL: name: add_imm_0_s64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 0, 0
+    ; CHECK: $x0 = COPY [[ADDXri]]
+    %0(s64) = COPY $x0
+    %1(s64) = G_CONSTANT i64 0
+    %2(s64) = G_ADD %0, %1
+    $x0 = COPY %2(s64)
+...
 
 ---
 name:            add_imm_s32_gpr_bb




More information about the llvm-commits mailing list