[llvm] 08b8b72 - [X86] Add inline assembly load hardening mitigation for Load Value Injection (LVI)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon May 11 14:09:07 PDT 2020


Author: Craig Topper
Date: 2020-05-11T14:08:16-07:00
New Revision: 08b8b724ee3ac7ae7f516e036616620aa33968f1

URL: https://github.com/llvm/llvm-project/commit/08b8b724ee3ac7ae7f516e036616620aa33968f1
DIFF: https://github.com/llvm/llvm-project/commit/08b8b724ee3ac7ae7f516e036616620aa33968f1.diff

LOG: [X86] Add inline assembly load hardening mitigation for Load Value Injection (LVI)

Added code to X86AsmParser::emitInstruction() to add an LFENCE after each instruction that may load, and emit a warning if it encounters an instruction that may be vulnerable, but cannot be automatically mitigated.

Differential Revision: https://reviews.llvm.org/D76158

Added: 
    llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll

Modified: 
    llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index ee76fe9fe32f..6b06656410eb 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -31,6 +31,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSubtargetInfo.h"
 #include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/raw_ostream.h"
@@ -39,6 +40,11 @@
 
 using namespace llvm;
 
+static cl::opt<bool> LVIInlineAsmHardening(
+    "x86-experimental-lvi-inline-asm-hardening",
+    cl::desc("Harden inline assembly code that may be vulnerable to Load Value"
+             " Injection (LVI). This feature is experimental."), cl::Hidden);
+
 static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
   if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
     ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
@@ -930,6 +936,11 @@ class X86AsmParser : public MCTargetAsmParser {
   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
   bool processInstruction(MCInst &Inst, const OperandVector &Ops);
 
+  // Load Value Injection (LVI) Mitigations for machine code
+  void emitWarningForSpecialLVIInstruction(SMLoc Loc);
+  bool applyLVICFIMitigation(MCInst &Inst);
+  bool applyLVILoadHardeningMitigation(MCInst &Inst, MCStreamer &Out);
+
   /// Wrapper around MCStreamer::emitInstruction(). Possibly adds
   /// instrumentation around Inst.
   void emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
@@ -3149,9 +3160,104 @@ bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
 
 static const char *getSubtargetFeatureName(uint64_t Val);
 
+void X86AsmParser::emitWarningForSpecialLVIInstruction(SMLoc Loc) {
+  Warning(Loc, "Instruction may be vulnerable to LVI and "
+               "requires manual mitigation");
+  Note(SMLoc(), "See https://software.intel.com/"
+                "security-software-guidance/insights/"
+                "deep-dive-load-value-injection#specialinstructions"
+                " for more information");
+}
+
+/// RET instructions and also instructions that indirect calls/jumps from memory
+/// combine a load and a branch within a single instruction. To mitigate these
+/// instructions against LVI, they must be decomposed into separate load and
+/// branch instructions, with an LFENCE in between. For more details, see:
+/// - X86LoadValueInjectionRetHardening.cpp
+/// - X86LoadValueInjectionIndirectThunks.cpp
+/// - https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
+///
+/// Returns `true` if a mitigation was applied or warning was emitted.
+bool X86AsmParser::applyLVICFIMitigation(MCInst &Inst) {
+  // Information on control-flow instructions that require manual mitigation can
+  // be found here:
+  // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
+  switch (Inst.getOpcode()) {
+  case X86::RETW:
+  case X86::RETL:
+  case X86::RETQ:
+  case X86::RETIL:
+  case X86::RETIQ:
+  case X86::RETIW:
+  case X86::JMP16m:
+  case X86::JMP32m:
+  case X86::JMP64m:
+  case X86::CALL16m:
+  case X86::CALL32m:
+  case X86::CALL64m:
+    emitWarningForSpecialLVIInstruction(Inst.getLoc());
+    return true;
+  }
+  return false;
+}
+
+/// To mitigate LVI, every instruction that performs a load can be followed by
+/// an LFENCE instruction to squash any potential mis-speculation. There are
+/// some instructions that require additional considerations, and may requre
+/// manual mitigation. For more details, see:
+/// https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
+///
+/// Returns `true` if a mitigation was applied or warning was emitted.
+bool X86AsmParser::applyLVILoadHardeningMitigation(MCInst &Inst,
+                                                   MCStreamer &Out) {
+  auto Opcode = Inst.getOpcode();
+  auto Flags = Inst.getFlags();
+  if ((Flags & X86::IP_HAS_REPEAT) || (Flags & X86::IP_HAS_REPEAT_NE)) {
+    // Information on REP string instructions that require manual mitigation can
+    // be found here:
+    // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
+    switch (Opcode) {
+    case X86::CMPSB:
+    case X86::CMPSW:
+    case X86::CMPSL:
+    case X86::CMPSQ:
+    case X86::SCASB:
+    case X86::SCASW:
+    case X86::SCASL:
+    case X86::SCASQ:
+      emitWarningForSpecialLVIInstruction(Inst.getLoc());
+      return true;
+    }
+  } else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
+    // If a REP instruction is found on its own line, it may or may not be
+    // followed by a vulnerable instruction. Emit a warning just in case.
+    emitWarningForSpecialLVIInstruction(Inst.getLoc());
+    return true;
+  }
+
+  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
+  // LFENCE has the mayLoad property, don't double fence.
+  if (MCID.mayLoad() && Inst.getOpcode() != X86::LFENCE) {
+    MCInst FenceInst;
+    FenceInst.setOpcode(X86::LFENCE);
+    FenceInst.setLoc(Inst.getLoc());
+    Out.emitInstruction(FenceInst, getSTI());
+    return true;
+  }
+  return false;
+}
+
 void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands,
                                    MCStreamer &Out) {
   Out.emitInstruction(Inst, getSTI());
+
+  if (LVIInlineAsmHardening) {
+    if (getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity] &&
+        applyLVICFIMitigation(Inst))
+      return;
+    if (getSTI().getFeatureBits()[X86::FeatureLVILoadHardening])
+      applyLVILoadHardeningMitigation(Inst, Out);
+  }
 }
 
 bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,

diff  --git a/llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll b/llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll
new file mode 100644
index 000000000000..2b3ba2b30d4b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll
@@ -0,0 +1,156 @@
+; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown -mattr=+lvi-load-hardening -mattr=+lvi-cfi -x86-experimental-lvi-inline-asm-hardening < %s -o %t.out 2> %t.err
+; RUN: FileCheck %s --check-prefix=X86 < %t.out
+; RUN: FileCheck %s --check-prefix=WARN < %t.err
+
+; Test module-level assembly
+module asm "pop %rbx"
+module asm "ret"
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: ret
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local void @test_inline_asm() {
+entry:
+; X86-LABEL: test_inline_asm:
+  call void asm sideeffect "mov 0x3fed(%rip),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      movq  16365(%rip), %rax
+; X86-NEXT: lfence
+  call void asm sideeffect "movdqa 0x0(%rip),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      movdqa  (%rip), %xmm0
+; X86-NEXT: lfence
+  call void asm sideeffect "movslq 0x3e5d(%rip),%rbx", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      movslq  15965(%rip), %rbx
+; X86-NEXT: lfence
+  call void asm sideeffect "mov (%r12,%rax,8),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      movq  (%r12,%rax,8), %rax
+; X86-NEXT: lfence
+  call void asm sideeffect "movq (24)(%rsi), %r11", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      movq  24(%rsi), %r11
+; X86-NEXT: lfence
+  call void asm sideeffect "cmove %r12,%rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      cmoveq  %r12, %rax
+; X86-NOT:  lfence
+  call void asm sideeffect "cmove (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      cmoveq  (%r12), %rax
+; X86-NEXT: lfence
+  call void asm sideeffect "pop %rbx", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      popq  %rbx
+; X86-NEXT: lfence
+  call void asm sideeffect "popq %rbx", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      popq  %rbx
+; X86-NEXT: lfence
+  call void asm sideeffect "xchg (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      xchgq %rax, (%r12)
+; X86-NEXT: lfence
+  call void asm sideeffect "cmpxchg %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      cmpxchgq  %r12, (%rax)
+; X86-NEXT: lfence
+  call void asm sideeffect "vpxor (%rcx,%rdx,1),%ymm1,%ymm0", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      vpxor (%rcx,%rdx), %ymm1, %ymm0
+; X86-NEXT: lfence
+  call void asm sideeffect "vpmuludq 0x20(%rsi),%ymm0,%ymm12", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      vpmuludq  32(%rsi), %ymm0, %ymm12
+; X86-NEXT: lfence
+  call void asm sideeffect "vpexpandq 0x40(%rdi),%zmm8{%k2}{z}", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      vpexpandq 64(%rdi), %zmm8 {%k2} {z}
+; X86-NEXT: lfence
+  call void asm sideeffect "addq (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      addq  (%r12), %rax
+; X86-NEXT: lfence
+  call void asm sideeffect "subq Lpoly+0(%rip), %rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      subq  Lpoly+0(%rip), %rax
+; X86-NEXT: lfence
+  call void asm sideeffect "adcq %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      adcq  %r12, (%rax)
+; X86-NEXT: lfence
+  call void asm sideeffect "negq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      negq  (%rax)
+; X86-NEXT: lfence
+  call void asm sideeffect "incq %rax", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      incq  %rax
+; X86-NOT:  lfence
+  call void asm sideeffect "mulq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      mulq  (%rax)
+; X86-NEXT: lfence
+  call void asm sideeffect "imulq (%rax),%rdx", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      imulq (%rax), %rdx
+; X86-NEXT: lfence
+  call void asm sideeffect "shlq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      shlq  (%rax)
+; X86-NEXT: lfence
+  call void asm sideeffect "shrq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      shrq  (%rax)
+; X86-NEXT: lfence
+  call void asm sideeffect "repz cmpsb %es:(%rdi),%ds:(%rsi)", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: repz cmpsb %es:(%rdi),%ds:(%rsi)
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      rep cmpsb %es:(%rdi), %ds:(%rsi)
+; X86-NOT:  lfence
+  call void asm sideeffect "repnz scasb", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: repnz scasb
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      repne scasb %es:(%rdi), %al
+; X86-NOT:  lfence
+  call void asm sideeffect "repnz", ""() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: repnz
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+  call void asm sideeffect "pinsrw $$0x6,(%eax),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      pinsrw  $6, (%eax), %xmm0
+; X86-NEXT: lfence
+  call void asm sideeffect "ret", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: ret
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      retq
+; X86-NOT:  lfence
+  call void asm sideeffect "ret $$8", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: ret $8
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      retq  $8
+; X86-NOT:  lfence
+  call void asm sideeffect "jmpq *(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: jmpq *(%rdx)
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      jmpq *(%rdx)
+; X86-NOT:  lfence
+  call void asm sideeffect "jmpq *0x100(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: jmpq *0x100(%rdx)
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      jmpq *256(%rdx)
+; X86-NOT:  lfence
+  call void asm sideeffect "callq *200(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
+; WARN:      warning: Instruction may be vulnerable to LVI
+; WARN-NEXT: callq *200(%rdx)
+; WARN-NEXT: ^
+; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
+; X86:      callq *200(%rdx)
+; X86-NOT:  lfence
+  call void asm sideeffect "fldt 0x8(%rbp)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      fldt  8(%rbp)
+; X86-NEXT: lfence
+  call void asm sideeffect "fld %st(0)", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      fld %st(0)
+; X86-NOT:  lfence
+; Test assembler macros
+  call void asm sideeffect ".macro mplus1 x\0Aincq (\5Cx)\0A.endm\0Amplus1 %rcx", "~{dirflag},~{fpsr},~{flags}"() #1
+; X86:      incq (%rcx)
+; X86-NEXT: lfence
+  ret void
+}
+
+attributes #1 = { nounwind }


        


More information about the llvm-commits mailing list