[llvm] r355485 - [X86] Enable the add with 128 -> sub with -128 encoding trick with X86ISD::ADD when the carry flag isn't used.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 5 23:36:38 PST 2019
Author: ctopper
Date: Tue Mar 5 23:36:38 2019
New Revision: 355485
URL: http://llvm.org/viewvc/llvm-project?rev=355485&view=rev
Log:
[X86] Enable the add with 128 -> sub with -128 encoding trick with X86ISD::ADD when the carry flag isn't used.
This allows us to use an 8-bit sign extended immediate instead of a 16 or 32 bit immediate.
Also do similar for 0x80000000 with 64-bit adds to avoid having to use a movabsq.
Modified:
llvm/trunk/lib/Target/X86/X86InstrCompiler.td
llvm/trunk/test/CodeGen/X86/add.ll
llvm/trunk/test/CodeGen/X86/xaluo.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=355485&r1=355484&r2=355485&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Tue Mar 5 23:36:38 2019
@@ -1491,6 +1491,13 @@ def : Pat<(add GR64:$src1, 128),
def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
(SUB64mi8 addr:$dst, -128)>;
+def : Pat<(X86add_flag_nocf GR16:$src1, 128),
+ (SUB16ri8 GR16:$src1, -128)>;
+def : Pat<(X86add_flag_nocf GR32:$src1, 128),
+ (SUB32ri8 GR32:$src1, -128)>;
+def : Pat<(X86add_flag_nocf GR64:$src1, 128),
+ (SUB64ri8 GR64:$src1, -128)>;
+
// The same trick applies for 32-bit immediate fields in 64-bit
// instructions.
def : Pat<(add GR64:$src1, 0x0000000080000000),
@@ -1498,6 +1505,9 @@ def : Pat<(add GR64:$src1, 0x00000000800
def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
(SUB64mi32 addr:$dst, 0xffffffff80000000)>;
+def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
+ (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
+
// To avoid needing to materialize an immediate in a register, use a 32-bit and
// with implicit zero-extension instead of a 64-bit and if the immediate has at
// least 32 bits of leading zeros. If in addition the last 32 bits can be
Modified: llvm/trunk/test/CodeGen/X86/add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add.ll?rev=355485&r1=355484&r2=355485&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add.ll Tue Mar 5 23:36:38 2019
@@ -507,3 +507,156 @@ define i32 @add_to_sub(i32 %a, i32 %b) {
%r = add i32 %add, 1
ret i32 %r
}
+
+declare void @bar_i32(i32)
+declare void @bar_i64(i64)
+
+; Make sure we can use sub -128 for add 128 when the flags are used.
+define void @add_i32_128_flag(i32 %x) {
+; X32-LABEL: add_i32_128_flag:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl $128, %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: je .LBB17_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: pushl %eax
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: calll bar_i32
+; X32-NEXT: addl $4, %esp
+; X32-NEXT: .cfi_adjust_cfa_offset -4
+; X32-NEXT: .LBB17_2: # %if.end
+; X32-NEXT: retl
+;
+; X64-LINUX-LABEL: add_i32_128_flag:
+; X64-LINUX: # %bb.0: # %entry
+; X64-LINUX-NEXT: subl $-128, %edi
+; X64-LINUX-NEXT: je .LBB17_1
+; X64-LINUX-NEXT: # %bb.2: # %if.then
+; X64-LINUX-NEXT: jmp bar_i32 # TAILCALL
+; X64-LINUX-NEXT: .LBB17_1: # %if.end
+; X64-LINUX-NEXT: retq
+;
+; X64-WIN32-LABEL: add_i32_128_flag:
+; X64-WIN32: # %bb.0: # %entry
+; X64-WIN32-NEXT: subl $-128, %ecx
+; X64-WIN32-NEXT: je .LBB17_1
+; X64-WIN32-NEXT: # %bb.2: # %if.then
+; X64-WIN32-NEXT: jmp bar_i32 # TAILCALL
+; X64-WIN32-NEXT: .LBB17_1: # %if.end
+; X64-WIN32-NEXT: retq
+entry:
+ %add = add i32 %x, 128
+ %tobool = icmp eq i32 %add, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ tail call void @bar_i32(i32 %add)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; Make sure we can use sub -128 for add 128 when the flags are used.
+define void @add_i64_128_flag(i64 %x) {
+; X32-LABEL: add_i64_128_flag:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $128, %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %eax, %edx
+; X32-NEXT: orl %ecx, %edx
+; X32-NEXT: je .LBB18_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: pushl %ecx
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: pushl %eax
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: calll bar_i64
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: .cfi_adjust_cfa_offset -8
+; X32-NEXT: .LBB18_2: # %if.end
+; X32-NEXT: retl
+;
+; X64-LINUX-LABEL: add_i64_128_flag:
+; X64-LINUX: # %bb.0: # %entry
+; X64-LINUX-NEXT: subq $-128, %rdi
+; X64-LINUX-NEXT: je .LBB18_1
+; X64-LINUX-NEXT: # %bb.2: # %if.then
+; X64-LINUX-NEXT: jmp bar_i64 # TAILCALL
+; X64-LINUX-NEXT: .LBB18_1: # %if.end
+; X64-LINUX-NEXT: retq
+;
+; X64-WIN32-LABEL: add_i64_128_flag:
+; X64-WIN32: # %bb.0: # %entry
+; X64-WIN32-NEXT: subq $-128, %rcx
+; X64-WIN32-NEXT: je .LBB18_1
+; X64-WIN32-NEXT: # %bb.2: # %if.then
+; X64-WIN32-NEXT: jmp bar_i64 # TAILCALL
+; X64-WIN32-NEXT: .LBB18_1: # %if.end
+; X64-WIN32-NEXT: retq
+entry:
+ %add = add i64 %x, 128
+ %tobool = icmp eq i64 %add, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ tail call void @bar_i64(i64 %add)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; Make sure we can use sub -2147483648 for add 2147483648 when the flags are used.
+define void @add_i64_2147483648_flag(i64 %x) {
+; X32-LABEL: add_i64_2147483648_flag:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %eax, %edx
+; X32-NEXT: orl %ecx, %edx
+; X32-NEXT: je .LBB19_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: pushl %ecx
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: pushl %eax
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: calll bar_i64
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: .cfi_adjust_cfa_offset -8
+; X32-NEXT: .LBB19_2: # %if.end
+; X32-NEXT: retl
+;
+; X64-LINUX-LABEL: add_i64_2147483648_flag:
+; X64-LINUX: # %bb.0: # %entry
+; X64-LINUX-NEXT: subq $-2147483648, %rdi # imm = 0x80000000
+; X64-LINUX-NEXT: je .LBB19_1
+; X64-LINUX-NEXT: # %bb.2: # %if.then
+; X64-LINUX-NEXT: jmp bar_i64 # TAILCALL
+; X64-LINUX-NEXT: .LBB19_1: # %if.end
+; X64-LINUX-NEXT: retq
+;
+; X64-WIN32-LABEL: add_i64_2147483648_flag:
+; X64-WIN32: # %bb.0: # %entry
+; X64-WIN32-NEXT: subq $-2147483648, %rcx # imm = 0x80000000
+; X64-WIN32-NEXT: je .LBB19_1
+; X64-WIN32-NEXT: # %bb.2: # %if.then
+; X64-WIN32-NEXT: jmp bar_i64 # TAILCALL
+; X64-WIN32-NEXT: .LBB19_1: # %if.end
+; X64-WIN32-NEXT: retq
+entry:
+ %add = add i64 %x, 2147483648
+ %tobool = icmp eq i64 %add, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ tail call void @bar_i64(i64 %add)
+ br label %if.end
+
+if.end:
+ ret void
+}
Modified: llvm/trunk/test/CodeGen/X86/xaluo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/xaluo.ll?rev=355485&r1=355484&r2=355485&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/xaluo.ll (original)
+++ llvm/trunk/test/CodeGen/X86/xaluo.ll Tue Mar 5 23:36:38 2019
@@ -293,10 +293,9 @@ define zeroext i1 @saddoi64imm4(i64 %v1,
define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm5:
; SDAG: ## %bb.0:
-; SDAG-NEXT: movl $2147483648, %ecx ## imm = 0x80000000
-; SDAG-NEXT: addq %rdi, %rcx
+; SDAG-NEXT: subq $-2147483648, %rdi ## imm = 0x80000000
; SDAG-NEXT: seto %al
-; SDAG-NEXT: movq %rcx, (%rsi)
+; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm5:
More information about the llvm-commits
mailing list