[llvm] r233653 - [AArch64] Fix poor codegen for add immediate.

Quentin Colombet qcolombet at apple.com
Mon Mar 30 17:31:13 PDT 2015


Author: qcolombet
Date: Mon Mar 30 19:31:13 2015
New Revision: 233653

URL: http://llvm.org/viewvc/llvm-project?rev=233653&view=rev
Log:
[AArch64] Fix poor codegen for add immediate.
We used to match the register variant before the immediate when the register
argument could be implicitly zero-extended.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td
    llvm/trunk/test/CodeGen/AArch64/addsub.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td?rev=233653&r1=233652&r2=233653&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td Mon Mar 30 19:31:13 2015
@@ -1637,10 +1637,12 @@ multiclass AddSub<bit isSub, string mnem
                   SDPatternOperator OpNode = null_frag> {
   let hasSideEffects = 0, isReMaterializable = 1, isAsCheapAsAMove = 1 in {
   // Add/Subtract immediate
+  let AddedComplexity = 6 in
   def Wri  : BaseAddSubImm<isSub, 0, GPR32sp, GPR32sp, addsub_shifted_imm32,
                            mnemonic, OpNode> {
     let Inst{31} = 0;
   }
+  let AddedComplexity = 6 in
   def Xri  : BaseAddSubImm<isSub, 0, GPR64sp, GPR64sp, addsub_shifted_imm64,
                            mnemonic, OpNode> {
     let Inst{31} = 1;

Modified: llvm/trunk/test/CodeGen/AArch64/addsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub.ll?rev=233653&r1=233652&r2=233653&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub.ll Mon Mar 30 19:31:13 2015
@@ -24,6 +24,34 @@ define void @add_small() {
   ret void
 }
 
+; Make sure we grab the imm variant when the register operand
+; can be implicitly zero-extend.
+; We used to generate something horrible like this:
+; wA = ldrb
+; xB = ldimm 12
+; xC = add xB, wA, uxtb
+; whereas this can be achieved with:
+; wA = ldrb
+; xC = add xA, #12 ; <- xA implicitly zero extend wA.
+define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) {
+; CHECK-LABEL: add_small_imm:
+entry:
+
+; CHECK: ldrb w[[LOAD32:[0-9]+]], [x0]
+  %t = load i8, i8* %p
+  %promoted = zext i8 %t to i64
+  %zextt = zext i8 %t to i32
+  %add = add nuw i32 %zextt, %b
+
+; CHECK: add [[ADD2:x[0-9]+]], x[[LOAD32]], #12
+  %add2 = add nuw i64 %promoted, 12
+  store i32 %add, i32* %addr
+
+; CHECK: str [[ADD2]], [x1]
+  store i64 %add2, i64* %q
+  ret void
+}
+
 ; Add 12-bit immediates, shifted left by 12 bits
 define void @add_med() {
 ; CHECK-LABEL: add_med:





More information about the llvm-commits mailing list