[llvm] r339041 - [X86] When using "and $0" and "orl $-1" to store 0 and -1 for minsize, make sure the store isn't volatile

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 6 11:44:26 PDT 2018


Author: ctopper
Date: Mon Aug  6 11:44:26 2018
New Revision: 339041

URL: http://llvm.org/viewvc/llvm-project?rev=339041&view=rev
Log:
[X86] When using "and $0" and "orl $-1" to store 0 and -1 for minsize, make sure the store isn't volatile

If the store is volatile this might be a memory mapped IO access. In that case we shouldn't generate a load that didn't exist in the source

Differential Revision: https://reviews.llvm.org/D50270

Modified:
    llvm/trunk/lib/Target/X86/X86InstrCompiler.td
    llvm/trunk/test/CodeGen/X86/store-zero-and-minus-one.ll
    llvm/trunk/test/CodeGen/X86/tail-opts.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=339041&r1=339040&r2=339041&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Mon Aug  6 11:44:26 2018
@@ -1006,16 +1006,22 @@ def : Pat<(i64 (atomic_load_64 addr:$src
 // DAG Pattern Matching Rules
 //===----------------------------------------------------------------------===//
 
+def nonvolatile_store : PatFrag<(ops node:$val, node:$ptr),
+                                 (store node:$val, node:$ptr), [{
+  return !cast<StoreSDNode>(N)->isVolatile();
+}]>;
+
+
 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
 // binary size compared to a regular MOV, but it introduces an unnecessary
 // load, so is not suitable for regular or optsize functions.
 let Predicates = [OptForMinSize] in {
-def : Pat<(store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
-def : Pat<(store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
-def : Pat<(store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
-def : Pat<(store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
-def : Pat<(store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
-def : Pat<(store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
+def : Pat<(nonvolatile_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
+def : Pat<(nonvolatile_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
+def : Pat<(nonvolatile_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
+def : Pat<(nonvolatile_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
+def : Pat<(nonvolatile_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
+def : Pat<(nonvolatile_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
 }
 
 // In kernel code model, we can get the address of a label

Modified: llvm/trunk/test/CodeGen/X86/store-zero-and-minus-one.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store-zero-and-minus-one.ll?rev=339041&r1=339040&r2=339041&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/store-zero-and-minus-one.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store-zero-and-minus-one.ll Mon Aug  6 11:44:26 2018
@@ -147,13 +147,14 @@ define void @volatile_zero_64(i64* %p) m
 ; CHECK32-LABEL: volatile_zero_64:
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    andl $0, 4(%eax)
-; CHECK32-NEXT:    andl $0, (%eax)
+; CHECK32-NEXT:    xorl %ecx, %ecx
+; CHECK32-NEXT:    movl %ecx, 4(%eax)
+; CHECK32-NEXT:    movl %ecx, (%eax)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: volatile_zero_64:
 ; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    andq $0, (%rdi)
+; CHECK64-NEXT:    movq $0, (%rdi)
 ; CHECK64-NEXT:    retq
 entry:
   store volatile i64 0, i64* %p
@@ -164,12 +165,12 @@ define void @volatile_zero_32(i32* %p) m
 ; CHECK32-LABEL: volatile_zero_32:
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    andl $0, (%eax)
+; CHECK32-NEXT:    movl $0, (%eax)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: volatile_zero_32:
 ; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    andl $0, (%rdi)
+; CHECK64-NEXT:    movl $0, (%rdi)
 ; CHECK64-NEXT:    retq
 entry:
   store volatile i32 0, i32* %p
@@ -180,12 +181,12 @@ define void @volatile_zero_16(i16* %p) m
 ; CHECK32-LABEL: volatile_zero_16:
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    andw $0, (%eax)
+; CHECK32-NEXT:    movw $0, (%eax)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: volatile_zero_16:
 ; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    andw $0, (%rdi)
+; CHECK64-NEXT:    movw $0, (%rdi)
 ; CHECK64-NEXT:    retq
 entry:
   store volatile i16 0, i16* %p
@@ -197,13 +198,15 @@ define void @volatile_minus_one_64(i64*
 ; CHECK32-LABEL: volatile_minus_one_64:
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    orl $-1, 4(%eax)
-; CHECK32-NEXT:    orl $-1, (%eax)
+; CHECK32-NEXT:    xorl %ecx, %ecx
+; CHECK32-NEXT:    decl %ecx
+; CHECK32-NEXT:    movl %ecx, 4(%eax)
+; CHECK32-NEXT:    movl %ecx, (%eax)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: volatile_minus_one_64:
 ; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    orq $-1, (%rdi)
+; CHECK64-NEXT:    movq $-1, (%rdi)
 ; CHECK64-NEXT:    retq
 entry:
   store volatile i64 -1, i64* %p
@@ -214,12 +217,12 @@ define void @volatile_minus_one_32(i32*
 ; CHECK32-LABEL: volatile_minus_one_32:
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    orl $-1, (%eax)
+; CHECK32-NEXT:    movl $-1, (%eax)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: volatile_minus_one_32:
 ; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    orl $-1, (%rdi)
+; CHECK64-NEXT:    movl $-1, (%rdi)
 ; CHECK64-NEXT:    retq
 entry:
   store volatile i32 -1, i32* %p
@@ -230,12 +233,12 @@ define void @volatile_minus_one_16(i16*
 ; CHECK32-LABEL: volatile_minus_one_16:
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    orw $-1, (%eax)
+; CHECK32-NEXT:    movw $-1, (%eax)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: volatile_minus_one_16:
 ; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    orw $-1, (%rdi)
+; CHECK64-NEXT:    movw $-1, (%rdi)
 ; CHECK64-NEXT:    retq
 entry:
   store volatile i16 -1, i16* %p

Modified: llvm/trunk/test/CodeGen/X86/tail-opts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tail-opts.ll?rev=339041&r1=339040&r2=339041&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tail-opts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tail-opts.ll Mon Aug  6 11:44:26 2018
@@ -377,7 +377,7 @@ return:
 ; CHECK-LABEL: two_minsize:
 ; CHECK-NOT: XYZ
 ; CHECK: ret
-; CHECK: andl $0, XYZ(%rip)
+; CHECK: movl $0, XYZ(%rip)
 ; CHECK: movl $1, XYZ(%rip)
 ; CHECK-NOT: XYZ
 




More information about the llvm-commits mailing list