[llvm] 416e8c6 - Enhance stack protector for calling no return function
Xiang1 Zhang via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 30 21:25:30 PST 2022
Author: Xiang1 Zhang
Date: 2022-12-01T13:20:36+08:00
New Revision: 416e8c6ad529c57f21f46c6f52ded96d3ed239fb
URL: https://github.com/llvm/llvm-project/commit/416e8c6ad529c57f21f46c6f52ded96d3ed239fb
DIFF: https://github.com/llvm/llvm-project/commit/416e8c6ad529c57f21f46c6f52ded96d3ed239fb.diff
LOG: Enhance stack protector for calling no return function
Reviewed By: LuoYuanke, WangPengfei, lebedev.ri
Differential Revision: https://reviews.llvm.org/D138774
Added:
llvm/test/CodeGen/X86/stack-protector-no-return.ll
Modified:
llvm/lib/CodeGen/StackProtector.cpp
llvm/test/CodeGen/X86/stack-protector-2.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index ff828f560f11..2eef88bc1c8d 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -414,11 +414,11 @@ static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
///
/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
/// node.
-static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
+static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
const TargetLoweringBase *TLI, AllocaInst *&AI) {
bool SupportsSelectionDAGSP = false;
IRBuilder<> B(&F->getEntryBlock().front());
- PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
+ PointerType *PtrTy = Type::getInt8PtrTy(CheckLoc->getContext());
AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
@@ -443,14 +443,27 @@ bool StackProtector::InsertStackProtectors() {
AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
for (BasicBlock &BB : llvm::make_early_inc_range(*F)) {
- ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
- if (!RI)
+ Instruction *CheckLoc = dyn_cast<ReturnInst>(BB.getTerminator());
+ if (!CheckLoc) {
+ for (auto &Inst : BB) {
+ auto *CB = dyn_cast<CallBase>(&Inst);
+ if (!CB)
+ continue;
+ if (!CB->doesNotReturn())
+ continue;
+ // Do stack check before non-return calls (e.g: __cxa_throw)
+ CheckLoc = CB;
+ break;
+ }
+ }
+
+ if (!CheckLoc)
continue;
// Generate prologue instrumentation if not already generated.
if (!HasPrologue) {
HasPrologue = true;
- SupportsSelectionDAGSP &= CreatePrologue(F, M, RI, TLI, AI);
+ SupportsSelectionDAGSP &= CreatePrologue(F, M, CheckLoc, TLI, AI);
}
// SelectionDAG based code generation. Nothing else needs to be done here.
@@ -476,8 +489,7 @@ bool StackProtector::InsertStackProtectors() {
// verifier guarantees that a tail call is either directly before the
// return or with a single correct bitcast of the return value in between so
// we don't need to worry about many situations here.
- Instruction *CheckLoc = RI;
- Instruction *Prev = RI->getPrevNonDebugInstruction();
+ Instruction *Prev = CheckLoc->getPrevNonDebugInstruction();
if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
CheckLoc = Prev;
else if (Prev) {
diff --git a/llvm/test/CodeGen/X86/stack-protector-2.ll b/llvm/test/CodeGen/X86/stack-protector-2.ll
index c6971a59f813..f2fc64ab0c86 100644
--- a/llvm/test/CodeGen/X86/stack-protector-2.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-2.ll
@@ -192,4 +192,34 @@ define dso_local void @bar_nossp(i64 %0) {
ret void
}
+; Check stack protect for noreturn call
+define dso_local i32 @foo_no_return(i32 %0) #1 {
+; CHECK-LABEL: @foo_no_return
+entry:
+ %cmp = icmp sgt i32 %0, 4
+ br i1 %cmp, label %if.then, label %if.end
+
+; CHECK: if.then: ; preds = %entry
+; CHECK-NEXT: %StackGuard1 = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*), align 8
+; CHECK-NEXT: %1 = load volatile i8*, i8** %StackGuardSlot, align 8
+; CHECK-NEXT: %2 = icmp eq i8* %StackGuard1, %1
+; CHECK-NEXT: br i1 %2, label %SP_return, label %CallStackCheckFailBlk
+; CHECK: SP_return: ; preds = %if.then
+; CHECK-NEXT: %call = call i32 @foo_no_return(i32 1)
+; CHECK-NEXT: br label %return
+; CHECK: if.end: ; preds = %entry
+; CHECK-NEXT: br label %return
+
+if.then: ; preds = %entry
+ %call = call i32 @foo_no_return(i32 1)
+ br label %return
+
+if.end: ; preds = %entry
+ br label %return
+
+return: ; preds = %if.end, %if.then
+ ret i32 0
+}
+
attributes #0 = { sspstrong }
+attributes #1 = { noreturn sspreq}
diff --git a/llvm/test/CodeGen/X86/stack-protector-no-return.ll b/llvm/test/CodeGen/X86/stack-protector-no-return.ll
new file mode 100644
index 000000000000..ce78b2b7fe75
--- /dev/null
+++ b/llvm/test/CodeGen/X86/stack-protector-no-return.ll
@@ -0,0 +1,194 @@
+; RUN: llc %s -mtriple=x86_64-unknown-linux-gnu -o - | FileCheck %s
+
+$__clang_call_terminate = comdat any
+
+ at _ZTIi = external dso_local constant i8*
+ at .str = private unnamed_addr constant [5 x i8] c"win\0A\00", align 1
+
+; Function Attrs: mustprogress noreturn sspreq uwtable
+define dso_local void @_Z7catchesv() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+; CHECK-LABEL: _Z7catchesv:
+; CHECK: # %bb.0: # %entry
+; CHECK: movq %fs:40, %rax
+; CHECK-NEXT: movq %rax, 8(%rsp)
+entry:
+ %exception = tail call i8* @__cxa_allocate_exception(i64 4) #8
+ %0 = bitcast i8* %exception to i32*
+ store i32 1, i32* %0, align 16
+ invoke void @__cxa_throw(i8* nonnull %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #9
+ to label %unreachable unwind label %lpad
+
+; CHECK: callq __cxa_allocate_exception
+; CHECK-NEXT: movl $1, (%rax)
+; CHECK-NEXT: movq %fs:40, %rcx
+; CHECK-NEXT: cmpq 8(%rsp), %rcx
+; CHECK-NEXT: jne .LBB0_12
+; CHECK-NEXT: # %bb.1: # %SP_return
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: movl $_ZTIi, %esi
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: callq __cxa_throw
+
+lpad: ; preds = %entry
+ %1 = landingpad { i8*, i32 }
+ catch i8* null
+ %2 = extractvalue { i8*, i32 } %1, 0
+ %3 = tail call i8* @__cxa_begin_catch(i8* %2) #8
+ %call = invoke i64 @write(i32 noundef 1, i8* noundef getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i64 0, i64 0), i64 noundef 4)
+ to label %invoke.cont unwind label %lpad1
+; CHECK: callq __cxa_begin_catch
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: movl $.L.str, %esi
+; CHECK-NEXT: movl $4, %edx
+; CHECK-NEXT: movl $1, %edi
+; CHECK-NEXT: callq write
+
+
+invoke.cont: ; preds = %lpad
+ invoke void @_exit(i32 noundef 1) #9
+ to label %invoke.cont2 unwind label %lpad1
+; CHECK: # %bb.3: # %invoke.cont
+; CHECK-NEXT: movq %fs:40, %rax
+; CHECK-NEXT: cmpq 8(%rsp), %rax
+; CHECK-NEXT: jne .LBB0_12
+; CHECK-NEXT: # %bb.4: # %SP_return3
+; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: movl $1, %edi
+; CHECK-NEXT: callq _exit
+
+invoke.cont2: ; preds = %invoke.cont
+ unreachable
+
+lpad1: ; preds = %invoke.cont, %lpad
+ %4 = landingpad { i8*, i32 }
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+; CHECK: .LBB0_6: # %lpad1
+; CHECK-NEXT: .Ltmp7:
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: .Ltmp8:
+; CHECK-NEXT: callq __cxa_end_catch
+
+eh.resume: ; preds = %lpad1
+ resume { i8*, i32 } %4
+; CHECK: # %bb.7: # %eh.resume
+; CHECK-NEXT: movq %fs:40, %rax
+; CHECK-NEXT: cmpq 8(%rsp), %rax
+; CHECK-NEXT: jne .LBB0_12
+; CHECK-NEXT: # %bb.8: # %SP_return6
+; CHECK-NEXT: movq %rbx, %rdi
+; CHECK-NEXT: callq _Unwind_Resume at PLT
+
+terminate.lpad: ; preds = %lpad1
+ %5 = landingpad { i8*, i32 }
+ catch i8* null
+ %6 = extractvalue { i8*, i32 } %5, 0
+ tail call void @__clang_call_terminate(i8* %6) #10
+ unreachable
+
+; CHECK: .LBB0_9: # %terminate.lpad
+; CHECK: movq %fs:40, %rcx
+; CHECK-NEXT: cmpq 8(%rsp), %rcx
+; CHECK-NEXT: jne .LBB0_12
+; CHECK-NEXT: # %bb.10: # %SP_return9
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: callq __clang_call_terminate
+
+; CHECK: .LBB0_12: # %CallStackCheckFailBlk
+; CHECK-NEXT: callq __stack_chk_fail at PLT
+
+unreachable: ; preds = %entry
+ unreachable
+}
+
+; Function Attrs: nofree
+declare dso_local noalias i8* @__cxa_allocate_exception(i64) local_unnamed_addr #1
+
+; Function Attrs: nofree noreturn
+declare dso_local void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr #2
+
+declare dso_local i32 @__gxx_personality_v0(...)
+
+; Function Attrs: nofree
+declare dso_local i8* @__cxa_begin_catch(i8*) local_unnamed_addr #1
+
+; Function Attrs: nofree
+declare dso_local noundef i64 @write(i32 noundef, i8* nocapture noundef readonly, i64 noundef) local_unnamed_addr #3
+
+; Function Attrs: nofree noreturn
+declare dso_local void @_exit(i32 noundef) local_unnamed_addr #4
+
+; Function Attrs: nofree
+declare dso_local void @__cxa_end_catch() local_unnamed_addr #1
+
+; Function Attrs: noinline noreturn nounwind
+define linkonce_odr hidden void @__clang_call_terminate(i8* %0) local_unnamed_addr #5 comdat {
+ %2 = tail call i8* @__cxa_begin_catch(i8* %0) #8
+ tail call void @_ZSt9terminatev() #10
+ unreachable
+}
+
+; Function Attrs: nofree noreturn nounwind
+declare dso_local void @_ZSt9terminatev() local_unnamed_addr #6
+
+; Function Attrs: mustprogress nofree sspreq uwtable
+define dso_local void @_Z4vulni(i32 noundef %op) local_unnamed_addr #7 {
+; CHECK-LABEL: _Z4vulni:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq %fs:40, %rax
+; CHECK-NEXT: movq %rax, (%rsp)
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: je .LBB2_1
+; CHECK-NEXT: # %bb.3: # %if.end
+; CHECK-NEXT: movq %fs:40, %rax
+; CHECK-NEXT: cmpq (%rsp), %rax
+; CHECK-NEXT: jne .LBB2_5
+; CHECK-NEXT: # %bb.4: # %SP_return3
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB2_1: # %if.then
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movl $4, %edi
+; CHECK-NEXT: callq __cxa_allocate_exception
+; CHECK-NEXT: movl $1, (%rax)
+; CHECK-NEXT: movq %fs:40, %rcx
+; CHECK-NEXT: cmpq (%rsp), %rcx
+; CHECK-NEXT: jne .LBB2_5
+; CHECK-NEXT: # %bb.2: # %SP_return
+; CHECK-NEXT: movl $_ZTIi, %esi
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: callq __cxa_throw
+; CHECK-NEXT: .LBB2_5: # %CallStackCheckFailBlk2
+; CHECK-NEXT: callq __stack_chk_fail at PLT
+entry:
+ %cmp = icmp eq i32 %op, 1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %exception = tail call i8* @__cxa_allocate_exception(i64 4) #8
+ %0 = bitcast i8* %exception to i32*
+ store i32 1, i32* %0, align 16
+ tail call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #9
+ unreachable
+
+if.end: ; preds = %entry
+ ret void
+}
+
+attributes #0 = { mustprogress noreturn sspreq uwtable }
+attributes #1 = { nofree }
+attributes #2 = { nofree noreturn }
+attributes #3 = { nofree }
+attributes #4 = { nofree noreturn }
+attributes #5 = { noinline noreturn nounwind }
+attributes #6 = { nofree noreturn nounwind }
+attributes #7 = { mustprogress nofree sspreq uwtable }
+attributes #8 = { nounwind }
+attributes #9 = { noreturn }
+attributes #10 = { noreturn nounwind }
More information about the llvm-commits
mailing list