[llvm] r360747 - [X86] Use OR32mi8Locked instead of LOCK_OR32mi8 in emitLockedStackOp.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue May 14 21:15:46 PDT 2019


Author: ctopper
Date: Tue May 14 21:15:46 2019
New Revision: 360747

URL: http://llvm.org/viewvc/llvm-project?rev=360747&view=rev
Log:
[X86] Use OR32mi8Locked instead of LOCK_OR32mi8 in emitLockedStackOp.

They encode the same way, but OR32mi8Locked sets hasUnmodeledSideEffects set
which should be stronger than the mayLoad/mayStore on LOCK_OR32mi8. I think
this makes sense since we are using it as a fence.

This also seems to hide the operation from the speculative load hardening pass
so I've reverted r360511.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp
    llvm/trunk/test/CodeGen/X86/speculative-load-hardening.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=360747&r1=360746&r2=360747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue May 14 21:15:46 2019
@@ -25872,7 +25872,7 @@ static SDValue emitLockedStackOp(Selecti
       DAG.getRegister(0, MVT::i16),                         // Segment.
       Zero,
       Chain};
-    SDNode *Res = DAG.getMachineNode(X86::LOCK_OR32mi8, DL, MVT::i32,
+    SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
                                      MVT::Other, Ops);
     return SDValue(Res, 1);
   }
@@ -25887,7 +25887,7 @@ static SDValue emitLockedStackOp(Selecti
     Zero,
     Chain
   };
-  SDNode *Res = DAG.getMachineNode(X86::LOCK_OR32mi8, DL, MVT::i32,
+  SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
                                    MVT::Other, Ops);
   return SDValue(Res, 1);
 }

Modified: llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp?rev=360747&r1=360746&r2=360747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp Tue May 14 21:15:46 2019
@@ -1719,11 +1719,9 @@ void X86SpeculativeLoadHardeningPass::tr
 
         // If we have at least one (non-frame-index, non-RIP) register operand,
         // and neither operand is load-dependent, we need to check the load.
-        // Also handle explicit references to RSP as used by idempotent atomic
-        // or with 0.
         unsigned BaseReg = 0, IndexReg = 0;
         if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
-            BaseMO.getReg() != X86::RSP && BaseMO.getReg() != X86::NoRegister)
+            BaseMO.getReg() != X86::NoRegister)
           BaseReg = BaseMO.getReg();
         if (IndexMO.getReg() != X86::NoRegister)
           IndexReg = IndexMO.getReg();

Modified: llvm/trunk/test/CodeGen/X86/speculative-load-hardening.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/speculative-load-hardening.ll?rev=360747&r1=360746&r2=360747&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/speculative-load-hardening.ll (original)
+++ llvm/trunk/test/CodeGen/X86/speculative-load-hardening.ll Tue May 14 21:15:46 2019
@@ -1148,17 +1148,12 @@ entry:
 define void @idempotent_atomic(i32* %x) speculative_load_hardening {
 ; X64-LABEL: idempotent_atomic:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsp, %rax
-; X64-NEXT:    movq $-1, %rcx
-; X64-NEXT:    sarq $63, %rax
-; X64-NEXT:    lock orl $0, -64(%rsp)
-; X64-NEXT:    shlq $47, %rax
-; X64-NEXT:    orq %rax, %rsp
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    retq
 ;
 ; X64-LFENCE-LABEL: idempotent_atomic:
 ; X64-LFENCE:       # %bb.0:
-; X64-LFENCE-NEXT:    lock orl $0, -64(%rsp)
+; X64-LFENCE-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-LFENCE-NEXT:    retq
   %tmp = atomicrmw or i32* %x, i32 0 seq_cst
   ret void




More information about the llvm-commits mailing list