[llvm] 9b7a0a5 - [X86] Add support for reusing ZF etc. from locked XADD instructions (PR20841)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 27 07:03:32 PDT 2021


Author: Simon Pilgrim
Date: 2021-04-27T15:01:13+01:00
New Revision: 9b7a0a50355d5dc8ab3e9598103ef81b00077ff4

URL: https://github.com/llvm/llvm-project/commit/9b7a0a50355d5dc8ab3e9598103ef81b00077ff4
DIFF: https://github.com/llvm/llvm-project/commit/9b7a0a50355d5dc8ab3e9598103ef81b00077ff4.diff

LOG: [X86] Add support for reusing ZF etc. from locked XADD instructions (PR20841)

XADD has the same EFLAGS behaviour as ADD

Reapplies rG2149aa73f640 (after it was reverted at rG535df472b042) - AFAICT rG029e41ec9800 should ensure we correctly tag the LXADD* ops as load/stores - I haven't been able to repro the sanitizer buildbot fails locally so this is a speculative commit.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86InstrInfo.cpp
    llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
    llvm/test/CodeGen/X86/atomic-flags.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 981c735c4239..beb343d06dfc 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4020,10 +4020,12 @@ inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
   case X86::SBB8ri:    case X86::SBB64rr:  case X86::SBB32rr:
   case X86::SBB16rr:   case X86::SBB8rr:   case X86::SBB64rm:
   case X86::SBB32rm:   case X86::SBB16rm:  case X86::SBB8rm:
-  case X86::NEG8r:     case X86::NEG16r:   case X86::NEG32r: case X86::NEG64r:
-  case X86::SAR8r1:    case X86::SAR16r1:  case X86::SAR32r1:case X86::SAR64r1:
-  case X86::SHR8r1:    case X86::SHR16r1:  case X86::SHR32r1:case X86::SHR64r1:
-  case X86::SHL8r1:    case X86::SHL16r1:  case X86::SHL32r1:case X86::SHL64r1:
+  case X86::NEG8r:     case X86::NEG16r:   case X86::NEG32r:  case X86::NEG64r:
+  case X86::LXADD64:   case X86::LXADD32:  case X86::LXADD16: case X86::LXADD8:
+  // TODO: Add additional LOCK/XADD instructions when we have test coverage.
+  case X86::SAR8r1:    case X86::SAR16r1:  case X86::SAR32r1: case X86::SAR64r1:
+  case X86::SHR8r1:    case X86::SHR16r1:  case X86::SHR32r1: case X86::SHR64r1:
+  case X86::SHL8r1:    case X86::SHL16r1:  case X86::SHL32r1: case X86::SHL64r1:
   case X86::LZCNT16rr: case X86::LZCNT16rm:
   case X86::LZCNT32rr: case X86::LZCNT32rm:
   case X86::LZCNT64rr: case X86::LZCNT64rm:

diff  --git a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
index b5a27892ad2f..200f55bd922a 100644
--- a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
+++ b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
@@ -228,7 +228,6 @@ define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
-; CHECK-NEXT:    testq %rax, %rax
 ; CHECK-NEXT:    movl $12, %eax
 ; CHECK-NEXT:    movl $34, %ecx
 ; CHECK-NEXT:    cmovsl %eax, %ecx

diff  --git a/llvm/test/CodeGen/X86/atomic-flags.ll b/llvm/test/CodeGen/X86/atomic-flags.ll
index dfd916f81216..42cf28751b59 100644
--- a/llvm/test/CodeGen/X86/atomic-flags.ll
+++ b/llvm/test/CodeGen/X86/atomic-flags.ll
@@ -131,7 +131,6 @@ define zeroext i1 @xadd_cmp0_i64(i64* %x) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:    lock xaddq %rax, (%rdi)
-; X64-NEXT:    testq %rax, %rax
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
 ;
@@ -167,7 +166,6 @@ define zeroext i1 @xadd_cmp0_i32(i32* %x) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:    lock xaddl %eax, (%rdi)
-; X64-NEXT:    testl %eax, %eax
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
 ;
@@ -176,7 +174,6 @@ define zeroext i1 @xadd_cmp0_i32(i32* %x) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl $1, %ecx
 ; X86-NEXT:    lock xaddl %ecx, (%eax)
-; X86-NEXT:    testl %ecx, %ecx
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
   %add = atomicrmw add i32* %x, i32 1 seq_cst
@@ -189,7 +186,6 @@ define zeroext i1 @xadd_cmp0_i16(i16* %x) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movw $1, %ax
 ; X64-NEXT:    lock xaddw %ax, (%rdi)
-; X64-NEXT:    testw %ax, %ax
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
 ;
@@ -198,7 +194,6 @@ define zeroext i1 @xadd_cmp0_i16(i16* %x) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movw $1, %cx
 ; X86-NEXT:    lock xaddw %cx, (%eax)
-; X86-NEXT:    testw %cx, %cx
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
   %add = atomicrmw add i16* %x, i16 1 seq_cst
@@ -211,7 +206,6 @@ define zeroext i1 @xadd_cmp0_i8(i8* %x) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movb $1, %al
 ; X64-NEXT:    lock xaddb %al, (%rdi)
-; X64-NEXT:    testb %al, %al
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
 ;
@@ -220,7 +214,6 @@ define zeroext i1 @xadd_cmp0_i8(i8* %x) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movb $1, %cl
 ; X86-NEXT:    lock xaddb %cl, (%eax)
-; X86-NEXT:    testb %cl, %cl
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
   %add = atomicrmw add i8* %x, i8 1 seq_cst


        


More information about the llvm-commits mailing list