[clang] e8bce83 - [X86] Enable compilation of user interrupt handlers.
via cfe-commits
cfe-commits at lists.llvm.org
Thu Apr 22 20:44:10 PDT 2021
Author: Wang, Pengfei
Date: 2021-04-23T11:43:57+08:00
New Revision: e8bce83996313ed3f4f5fce43107530d49fc3b64
URL: https://github.com/llvm/llvm-project/commit/e8bce83996313ed3f4f5fce43107530d49fc3b64
DIFF: https://github.com/llvm/llvm-project/commit/e8bce83996313ed3f4f5fce43107530d49fc3b64.diff
LOG: [X86] Enable compilation of user interrupt handlers.
Add __uintr_frame structure and use UIRET instruction for functions with
x86 interrupt calling convention when UINTR is present.
Reviewed By: LuoYuanke
Differential Revision: https://reviews.llvm.org/D99708
Added:
llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll
Modified:
clang/lib/Headers/uintrintrin.h
llvm/lib/Target/X86/X86ExpandPseudo.cpp
Removed:
################################################################################
diff --git a/clang/lib/Headers/uintrintrin.h b/clang/lib/Headers/uintrintrin.h
index 78aa8779c325..e3839dcebe1e 100644
--- a/clang/lib/Headers/uintrintrin.h
+++ b/clang/lib/Headers/uintrintrin.h
@@ -20,6 +20,13 @@
#ifdef __x86_64__
+struct __uintr_frame
+{
+ unsigned long long rip;
+ unsigned long long rflags;
+ unsigned long long rsp;
+};
+
/// Clears the user interrupt flag (UIF). Its effect takes place immediately: a
/// user interrupt cannot be delivered on the instruction boundary following
/// CLUI. Can be executed only if CR4.UINT = 1, the logical processor is in
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index e0875fb6432d..29a7b9840f44 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
#define DEBUG_TYPE "x86-pseudo"
@@ -315,8 +316,12 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
int64_t StackAdj = MBBI->getOperand(0).getImm();
X86FL->emitSPUpdate(MBB, MBBI, DL, StackAdj, true);
// Replace pseudo with machine iret
- BuildMI(MBB, MBBI, DL,
- TII->get(STI->is64Bit() ? X86::IRET64 : X86::IRET32));
+ unsigned RetOp = STI->is64Bit() ? X86::IRET64 : X86::IRET32;
+ // Use UIRET if UINTR is present (except for building kernel)
+ if (STI->is64Bit() && STI->hasUINTR() &&
+ MBB.getParent()->getTarget().getCodeModel() != CodeModel::Kernel)
+ RetOp = X86::UIRET;
+ BuildMI(MBB, MBBI, DL, TII->get(RetOp));
MBB.erase(MBBI);
return true;
}
diff --git a/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll b/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll
new file mode 100644
index 000000000000..837b0782004c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll
@@ -0,0 +1,171 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_sp --no_x86_scrub_rip
+; RUN: llc < %s | FileCheck %s -check-prefixes=CHECK-USER
+; RUN: llc -O0 < %s | FileCheck %s -check-prefixes=CHECK0-USER
+; RUN: llc -code-model=kernel < %s | FileCheck %s -check-prefixes=CHECK-KERNEL
+; RUN: llc -O0 -code-model=kernel < %s | FileCheck %s -check-prefixes=CHECK0-KERNEL
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.__uintr_frame = type { i64, i64, i64 }
+
+; #include <x86gprintrin.h>
+;
+; void
+; __attribute__ ((interrupt))
+; test_uintr_isr_cc_empty(struct __uintr_frame *frame, unsigned long long uirrv)
+; {
+; }
+
+define dso_local x86_intrcc void @test_uintr_isr_cc_empty(%struct.__uintr_frame* nocapture byval(%struct.__uintr_frame) %frame, i64 %uirrv) #0 {
+; CHECK-USER-LABEL: test_uintr_isr_cc_empty:
+; CHECK-USER: # %bb.0: # %entry
+; CHECK-USER-NEXT: pushq %rax
+; CHECK-USER-NEXT: cld
+; CHECK-USER-NEXT: addq $16, %rsp
+; CHECK-USER-NEXT: uiret
+;
+; CHECK0-USER-LABEL: test_uintr_isr_cc_empty:
+; CHECK0-USER: # %bb.0: # %entry
+; CHECK0-USER-NEXT: pushq %rax
+; CHECK0-USER-NEXT: cld
+; CHECK0-USER-NEXT: addq $16, %rsp
+; CHECK0-USER-NEXT: uiret
+;
+; CHECK-KERNEL-LABEL: test_uintr_isr_cc_empty:
+; CHECK-KERNEL: # %bb.0: # %entry
+; CHECK-KERNEL-NEXT: pushq %rax
+; CHECK-KERNEL-NEXT: cld
+; CHECK-KERNEL-NEXT: addq $16, %rsp
+; CHECK-KERNEL-NEXT: iretq
+;
+; CHECK0-KERNEL-LABEL: test_uintr_isr_cc_empty:
+; CHECK0-KERNEL: # %bb.0: # %entry
+; CHECK0-KERNEL-NEXT: pushq %rax
+; CHECK0-KERNEL-NEXT: cld
+; CHECK0-KERNEL-NEXT: addq $16, %rsp
+; CHECK0-KERNEL-NEXT: iretq
+entry:
+ ret void
+}
+
+; unsigned long long g_rip;
+; unsigned long long g_rflags;
+; unsigned long long g_rsp;
+; unsigned long long g_uirrv;
+;
+; void
+; __attribute__((interrupt))
+; test_uintr_isr_cc_args(struct __uintr_frame *frame, unsigned long long uirrv)
+; {
+; g_rip = frame->rip;
+; g_rflags = frame->rflags;
+; g_rsp = frame->rsp;
+; g_uirrv = uirrv;
+; }
+ at g_rip = dso_local local_unnamed_addr global i64 0, align 8
+ at g_rflags = dso_local local_unnamed_addr global i64 0, align 8
+ at g_rsp = dso_local local_unnamed_addr global i64 0, align 8
+ at g_uirrv = dso_local local_unnamed_addr global i64 0, align 8
+
+define dso_local x86_intrcc void @test_uintr_isr_cc_args(%struct.__uintr_frame* nocapture readonly byval(%struct.__uintr_frame) %frame, i64 %uirrv) #0 {
+; CHECK-USER-LABEL: test_uintr_isr_cc_args:
+; CHECK-USER: # %bb.0: # %entry
+; CHECK-USER-NEXT: pushq %rax
+; CHECK-USER-NEXT: pushq %rax
+; CHECK-USER-NEXT: pushq %rdx
+; CHECK-USER-NEXT: pushq %rcx
+; CHECK-USER-NEXT: cld
+; CHECK-USER-NEXT: movq 32(%rsp), %rax
+; CHECK-USER-NEXT: movq 40(%rsp), %rcx
+; CHECK-USER-NEXT: movq 48(%rsp), %rdx
+; CHECK-USER-NEXT: movq %rcx, g_rip(%rip)
+; CHECK-USER-NEXT: movq %rdx, g_rflags(%rip)
+; CHECK-USER-NEXT: movq 56(%rsp), %rcx
+; CHECK-USER-NEXT: movq %rcx, g_rsp(%rip)
+; CHECK-USER-NEXT: movq %rax, g_uirrv(%rip)
+; CHECK-USER-NEXT: popq %rcx
+; CHECK-USER-NEXT: popq %rdx
+; CHECK-USER-NEXT: popq %rax
+; CHECK-USER-NEXT: addq $16, %rsp
+; CHECK-USER-NEXT: uiret
+;
+; CHECK0-USER-LABEL: test_uintr_isr_cc_args:
+; CHECK0-USER: # %bb.0: # %entry
+; CHECK0-USER-NEXT: pushq %rax
+; CHECK0-USER-NEXT: pushq %rax
+; CHECK0-USER-NEXT: pushq %rdx
+; CHECK0-USER-NEXT: pushq %rcx
+; CHECK0-USER-NEXT: cld
+; CHECK0-USER-NEXT: movq 32(%rsp), %rax
+; CHECK0-USER-NEXT: leaq 40(%rsp), %rcx
+; CHECK0-USER-NEXT: movq (%rcx), %rdx
+; CHECK0-USER-NEXT: movq %rdx, g_rip(%rip)
+; CHECK0-USER-NEXT: movq 8(%rcx), %rdx
+; CHECK0-USER-NEXT: movq %rdx, g_rflags(%rip)
+; CHECK0-USER-NEXT: movq 16(%rcx), %rcx
+; CHECK0-USER-NEXT: movq %rcx, g_rsp(%rip)
+; CHECK0-USER-NEXT: movq %rax, g_uirrv(%rip)
+; CHECK0-USER-NEXT: popq %rcx
+; CHECK0-USER-NEXT: popq %rdx
+; CHECK0-USER-NEXT: popq %rax
+; CHECK0-USER-NEXT: addq $16, %rsp
+; CHECK0-USER-NEXT: uiret
+;
+; CHECK-KERNEL-LABEL: test_uintr_isr_cc_args:
+; CHECK-KERNEL: # %bb.0: # %entry
+; CHECK-KERNEL-NEXT: pushq %rax
+; CHECK-KERNEL-NEXT: pushq %rax
+; CHECK-KERNEL-NEXT: pushq %rdx
+; CHECK-KERNEL-NEXT: pushq %rcx
+; CHECK-KERNEL-NEXT: cld
+; CHECK-KERNEL-NEXT: movq 32(%rsp), %rax
+; CHECK-KERNEL-NEXT: movq 40(%rsp), %rcx
+; CHECK-KERNEL-NEXT: movq 48(%rsp), %rdx
+; CHECK-KERNEL-NEXT: movq %rcx, g_rip(%rip)
+; CHECK-KERNEL-NEXT: movq %rdx, g_rflags(%rip)
+; CHECK-KERNEL-NEXT: movq 56(%rsp), %rcx
+; CHECK-KERNEL-NEXT: movq %rcx, g_rsp(%rip)
+; CHECK-KERNEL-NEXT: movq %rax, g_uirrv(%rip)
+; CHECK-KERNEL-NEXT: popq %rcx
+; CHECK-KERNEL-NEXT: popq %rdx
+; CHECK-KERNEL-NEXT: popq %rax
+; CHECK-KERNEL-NEXT: addq $16, %rsp
+; CHECK-KERNEL-NEXT: iretq
+;
+; CHECK0-KERNEL-LABEL: test_uintr_isr_cc_args:
+; CHECK0-KERNEL: # %bb.0: # %entry
+; CHECK0-KERNEL-NEXT: pushq %rax
+; CHECK0-KERNEL-NEXT: pushq %rax
+; CHECK0-KERNEL-NEXT: pushq %rdx
+; CHECK0-KERNEL-NEXT: pushq %rcx
+; CHECK0-KERNEL-NEXT: cld
+; CHECK0-KERNEL-NEXT: movq 32(%rsp), %rax
+; CHECK0-KERNEL-NEXT: leaq 40(%rsp), %rcx
+; CHECK0-KERNEL-NEXT: movq (%rcx), %rdx
+; CHECK0-KERNEL-NEXT: movq %rdx, g_rip(%rip)
+; CHECK0-KERNEL-NEXT: movq 8(%rcx), %rdx
+; CHECK0-KERNEL-NEXT: movq %rdx, g_rflags(%rip)
+; CHECK0-KERNEL-NEXT: movq 16(%rcx), %rcx
+; CHECK0-KERNEL-NEXT: movq %rcx, g_rsp(%rip)
+; CHECK0-KERNEL-NEXT: movq %rax, g_uirrv(%rip)
+; CHECK0-KERNEL-NEXT: popq %rcx
+; CHECK0-KERNEL-NEXT: popq %rdx
+; CHECK0-KERNEL-NEXT: popq %rax
+; CHECK0-KERNEL-NEXT: addq $16, %rsp
+; CHECK0-KERNEL-NEXT: iretq
+entry:
+ %rip = getelementptr inbounds %struct.__uintr_frame, %struct.__uintr_frame* %frame, i64 0, i32 0
+ %0 = load i64, i64* %rip, align 8
+ store i64 %0, i64* @g_rip, align 8
+ %rflags = getelementptr inbounds %struct.__uintr_frame, %struct.__uintr_frame* %frame, i64 0, i32 1
+ %1 = load i64, i64* %rflags, align 8
+ store i64 %1, i64* @g_rflags, align 8
+ %rsp = getelementptr inbounds %struct.__uintr_frame, %struct.__uintr_frame* %frame, i64 0, i32 2
+ %2 = load i64, i64* %rsp, align 8
+ store i64 %2, i64* @g_rsp, align 8
+ store i64 %uirrv, i64* @g_uirrv, align 8
+ ret void
+}
+
+attributes #0 = { nofree norecurse nounwind willreturn "disable-tail-calls"="true" "frame-pointer"="none" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+uintr" "tune-cpu"="generic" }
More information about the cfe-commits
mailing list