[llvm] f95a67d - [X86] Add RET-hardening Support to mitigate Load Value Injection (LVI)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 3 12:09:04 PDT 2020


Author: Scott Constable
Date: 2020-04-03T12:08:34-07:00
New Revision: f95a67d8b8a8cec326ee4a7e8427b779936d100a

URL: https://github.com/llvm/llvm-project/commit/f95a67d8b8a8cec326ee4a7e8427b779936d100a
DIFF: https://github.com/llvm/llvm-project/commit/f95a67d8b8a8cec326ee4a7e8427b779936d100a.diff

LOG: [X86] Add RET-hardening Support to mitigate Load Value Injection (LVI)

Adding a pass that replaces every ret instruction with the sequence:

pop <scratch-reg>
lfence
jmp *<scratch-reg>

where <scratch-reg> is some available scratch register, according to the
calling convention of the function being mitigated.

Differential Revision: https://reviews.llvm.org/D75935

Added: 
    llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp
    llvm/test/CodeGen/X86/lvi-hardening-ret.ll

Modified: 
    llvm/lib/Target/X86/CMakeLists.txt
    llvm/lib/Target/X86/X86.h
    llvm/lib/Target/X86/X86TargetMachine.cpp
    llvm/test/CodeGen/X86/O0-pipeline.ll
    llvm/test/CodeGen/X86/O3-pipeline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index a07b66426fbb..5df9978bf11d 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -52,6 +52,7 @@ set(sources
   X86InstrInfo.cpp
   X86EvexToVex.cpp
   X86LegalizerInfo.cpp
+  X86LoadValueInjectionRetHardening.cpp
   X86MCInstLower.cpp
   X86MachineFunctionInfo.cpp
   X86MacroFusion.cpp

diff  --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index f251cde5d253..1b6e99ae7b9b 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -142,6 +142,7 @@ InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
                                                   X86Subtarget &,
                                                   X86RegisterBankInfo &);
 
+FunctionPass *createX86LoadValueInjectionRetHardeningPass();
 FunctionPass *createX86SpeculativeLoadHardeningPass();
 
 void initializeEvexToVexInstPassPass(PassRegistry &);
@@ -158,6 +159,7 @@ void initializeX86DomainReassignmentPass(PassRegistry &);
 void initializeX86ExecutionDomainFixPass(PassRegistry &);
 void initializeX86ExpandPseudoPass(PassRegistry &);
 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
+void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
 void initializeX86OptimizeLEAPassPass(PassRegistry &);
 void initializeX86PartialReductionPass(PassRegistry &);
 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);

diff  --git a/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp
new file mode 100644
index 000000000000..9c36e86099f9
--- /dev/null
+++ b/llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp
@@ -0,0 +1,140 @@
+//===-- X86LoadValueInjectionRetHardening.cpp - LVI RET hardening for x86 --==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Description: Replaces every `ret` instruction with the sequence:
+/// ```
+/// pop <scratch-reg>
+/// lfence
+/// jmp *<scratch-reg>
+/// ```
+/// where `<scratch-reg>` is some available scratch register, according to the
+/// calling convention of the function being mitigated.
+///
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include <bitset>
+
+using namespace llvm;
+
+#define PASS_KEY "x86-lvi-ret"
+#define DEBUG_TYPE PASS_KEY
+
+STATISTIC(NumFences, "Number of LFENCEs inserted for LVI mitigation");
+STATISTIC(NumFunctionsConsidered, "Number of functions analyzed");
+STATISTIC(NumFunctionsMitigated, "Number of functions for which mitigations "
+                                 "were deployed");
+
+namespace {
+
+class X86LoadValueInjectionRetHardeningPass : public MachineFunctionPass {
+public:
+  X86LoadValueInjectionRetHardeningPass() : MachineFunctionPass(ID) {}
+  StringRef getPassName() const override {
+    return "X86 Load Value Injection (LVI) Ret-Hardening";
+  }
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  static char ID;
+};
+
+} // end anonymous namespace
+
+char X86LoadValueInjectionRetHardeningPass::ID = 0;
+
+bool X86LoadValueInjectionRetHardeningPass::runOnMachineFunction(
+    MachineFunction &MF) {
+  LLVM_DEBUG(dbgs() << "***** " << getPassName() << " : " << MF.getName()
+                    << " *****\n");
+  const X86Subtarget *Subtarget = &MF.getSubtarget<X86Subtarget>();
+  if (!Subtarget->useLVIControlFlowIntegrity() || !Subtarget->is64Bit())
+    return false; // FIXME: support 32-bit
+
+  // Don't skip functions with the "optnone" attr but participate in opt-bisect.
+  const Function &F = MF.getFunction();
+  if (!F.hasOptNone() && skipFunction(F))
+    return false;
+
+  ++NumFunctionsConsidered;
+  const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
+  const X86InstrInfo *TII = Subtarget->getInstrInfo();
+  unsigned ClobberReg = X86::NoRegister;
+  std::bitset<X86::NUM_TARGET_REGS> UnclobberableGR64s;
+  UnclobberableGR64s.set(X86::RSP); // can't clobber stack pointer
+  UnclobberableGR64s.set(X86::RIP); // can't clobber instruction pointer
+  UnclobberableGR64s.set(X86::RAX); // used for function return
+  UnclobberableGR64s.set(X86::RDX); // used for function return
+
+  // We can clobber any register allowed by the function's calling convention.
+  for (const MCPhysReg *PR = TRI->getCalleeSavedRegs(&MF); auto Reg = *PR; ++PR)
+    UnclobberableGR64s.set(Reg);
+  for (auto &Reg : X86::GR64RegClass) {
+    if (!UnclobberableGR64s.test(Reg)) {
+      ClobberReg = Reg;
+      break;
+    }
+  }
+
+  if (ClobberReg != X86::NoRegister) {
+    LLVM_DEBUG(dbgs() << "Selected register "
+                      << Subtarget->getRegisterInfo()->getRegAsmName(ClobberReg)
+                      << " to clobber\n");
+  } else {
+    LLVM_DEBUG(dbgs() << "Could not find a register to clobber\n");
+  }
+
+  bool Modified = false;
+  for (auto &MBB : MF) {
+    MachineInstr &MI = MBB.back();
+    if (MI.getOpcode() != X86::RETQ)
+      continue;
+
+    if (ClobberReg != X86::NoRegister) {
+      MBB.erase_instr(&MI);
+      BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::POP64r))
+          .addReg(ClobberReg, RegState::Define)
+          .setMIFlag(MachineInstr::FrameDestroy);
+      BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::LFENCE));
+      BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::JMP64r))
+          .addReg(ClobberReg);
+    } else {
+      // In case there is no available scratch register, we can still read from
+      // RSP to assert that RSP points to a valid page. The write to RSP is
+      // also helpful because it verifies that the stack's write permissions
+      // are intact.
+      MachineInstr *Fence = BuildMI(MBB, MI, DebugLoc(), TII->get(X86::LFENCE));
+      addRegOffset(BuildMI(MBB, Fence, DebugLoc(), TII->get(X86::SHL64mi)),
+                   X86::RSP, false, 0)
+          .addImm(0)
+          ->addRegisterDead(X86::EFLAGS, TRI);
+    }
+
+    ++NumFences;
+    Modified = true;
+  }
+
+  if (Modified)
+    ++NumFunctionsMitigated;
+  return Modified;
+}
+
+INITIALIZE_PASS(X86LoadValueInjectionRetHardeningPass, PASS_KEY,
+                "X86 LVI ret hardener", false, false)
+
+FunctionPass *llvm::createX86LoadValueInjectionRetHardeningPass() {
+  return new X86LoadValueInjectionRetHardeningPass();
+}

diff  --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 16c3edf8769f..b2551b64eb0d 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -83,6 +83,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86Target() {
   initializeX86SpeculativeLoadHardeningPassPass(PR);
   initializeX86FlagsCopyLoweringPassPass(PR);
   initializeX86CondBrFoldingPassPass(PR);
+  initializeX86LoadValueInjectionRetHardeningPassPass(PR);
   initializeX86OptimizeLEAPassPass(PR);
   initializeX86PartialReductionPass(PR);
 }
@@ -540,6 +541,7 @@ void X86PassConfig::addPreEmitPass2() {
   // Identify valid longjmp targets for Windows Control Flow Guard.
   if (TT.isOSWindows())
     addPass(createCFGuardLongjmpPass());
+  addPass(createX86LoadValueInjectionRetHardeningPass());
 }
 
 std::unique_ptr<CSEConfigBase> X86PassConfig::getCSEConfig() const {

diff  --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll
index 5ca3129c784d..9af81b77a70b 100644
--- a/llvm/test/CodeGen/X86/O0-pipeline.ll
+++ b/llvm/test/CodeGen/X86/O0-pipeline.ll
@@ -74,6 +74,7 @@
 ; CHECK-NEXT:       Live DEBUG_VALUE analysis
 ; CHECK-NEXT:       X86 Indirect Thunks
 ; CHECK-NEXT:       Check CFA info and insert CFI instructions if needed
+; CHECK-NEXT:       X86 Load Value Injection (LVI) Ret-Hardening
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       Machine Optimization Remark Emitter
 ; CHECK-NEXT:       X86 Assembly Printer

diff  --git a/llvm/test/CodeGen/X86/O3-pipeline.ll b/llvm/test/CodeGen/X86/O3-pipeline.ll
index f7518afc5cd5..97ce5082d4f7 100644
--- a/llvm/test/CodeGen/X86/O3-pipeline.ll
+++ b/llvm/test/CodeGen/X86/O3-pipeline.ll
@@ -184,6 +184,7 @@
 ; CHECK-NEXT:       Live DEBUG_VALUE analysis
 ; CHECK-NEXT:       X86 Indirect Thunks
 ; CHECK-NEXT:       Check CFA info and insert CFI instructions if needed
+; CHECK-NEXT:       X86 Load Value Injection (LVI) Ret-Hardening
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       Machine Optimization Remark Emitter
 ; CHECK-NEXT:       X86 Assembly Printer

diff  --git a/llvm/test/CodeGen/X86/lvi-hardening-ret.ll b/llvm/test/CodeGen/X86/lvi-hardening-ret.ll
new file mode 100644
index 000000000000..9f2b028b3034
--- /dev/null
+++ b/llvm/test/CodeGen/X86/lvi-hardening-ret.ll
@@ -0,0 +1,72 @@
+; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown < %s | FileCheck %s
+
+define dso_local void @one_instruction() #0 {
+; CHECK-LABEL: one_instruction:
+entry:
+  ret void
+; CHECK-NOT:   retq
+; CHECK:       popq %[[x:[^ ]*]]
+; CHECK-NEXT:  lfence
+; CHECK-NEXT:  jmpq *%[[x]]
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local i32 @ordinary_function(i32 %x, i32 %y) #0 {
+; CHECK-LABEL: ordinary_function:
+entry:
+  %x.addr = alloca i32, align 4
+  %y.addr = alloca i32, align 4
+  store i32 %x, i32* %x.addr, align 4
+  store i32 %y, i32* %y.addr, align 4
+  %0 = load i32, i32* %x.addr, align 4
+  %1 = load i32, i32* %y.addr, align 4
+  %add = add nsw i32 %0, %1
+  ret i32 %add
+; CHECK-NOT:  retq
+; CHECK:      popq %[[x:[^ ]*]]
+; CHECK-NEXT: lfence
+; CHECK-NEXT: jmpq *%[[x]]
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local i32 @no_caller_saved_registers_function(i32 %x, i32 %y) #1 {
+; CHECK-LABEL: no_caller_saved_registers_function:
+entry:
+  %x.addr = alloca i32, align 4
+  %y.addr = alloca i32, align 4
+  store i32 %x, i32* %x.addr, align 4
+  store i32 %y, i32* %y.addr, align 4
+  %0 = load i32, i32* %x.addr, align 4
+  %1 = load i32, i32* %y.addr, align 4
+  %add = add nsw i32 %0, %1
+  ret i32 %add
+; CHECK-NOT:  retq
+; CHECK:      shlq $0, (%{{[^ ]*}})
+; CHECK-NEXT: lfence
+; CHECK-NEXT: retq
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local preserve_mostcc void @preserve_most() #0 {
+; CHECK-LABEL: preserve_most:
+entry:
+  ret void
+; CHECK-NOT:  retq
+; CHECK:      popq %r11
+; CHECK-NEXT: lfence
+; CHECK-NEXT: jmpq *%r11
+}
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local preserve_allcc void @preserve_all() #0 {
+; CHECK-LABEL: preserve_all:
+entry:
+  ret void
+; CHECK-NOT:  retq
+; CHECK:      popq %r11
+; CHECK-NEXT: lfence
+; CHECK-NEXT: jmpq *%r11
+}
+
+attributes #0 = { "target-features"="+lvi-cfi" }
+attributes #1 = { "no_caller_saved_registers" "target-features"="+lvi-cfi" }


        


More information about the llvm-commits mailing list