[llvm] r368923 - [AArch64][GlobalISel] Custom selection for s8 load acquire.
Amara Emerson via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 14 14:30:31 PDT 2019
Author: aemerson
Date: Wed Aug 14 14:30:30 2019
New Revision: 368923
URL: http://llvm.org/viewvc/llvm-project?rev=368923&view=rev
Log:
[AArch64][GlobalISel] Custom selection for s8 load acquire.
Implement this single atomic load instruction so that we can compile stack
protector code.
Differential Revision: https://reviews.llvm.org/D66245
Added:
llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
Modified:
llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp?rev=368923&r1=368922&r2=368923&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp Wed Aug 14 14:30:30 2019
@@ -1740,7 +1740,14 @@ bool AArch64InstructionSelector::select(
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
- LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ // For now we just support s8 acquire loads to be able to compile stack
+ // protector code.
+ if (MemOp.getOrdering() == AtomicOrdering::Acquire &&
+ MemOp.getSize() == 1) {
+ I.setDesc(TII.get(AArch64::LDARB));
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ }
+ LLVM_DEBUG(dbgs() << "Atomic load/store not fully supported yet\n");
return false;
}
unsigned MemSizeInBits = MemOp.getSize() * 8;
Added: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir?rev=368923&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir (added)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir Wed Aug 14 14:30:30 2019
@@ -0,0 +1,37 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64"
+
+ define i8 @load_acq_i8(i8* %ptr) {
+ %v = load atomic i8, i8* %ptr acquire, align 8
+ ret i8 %v
+ }
+
+...
+---
+name: load_acq_i8
+alignment: 2
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$x0' }
+machineFunctionInfo: {}
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: load_acq_i8
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire 1 from %ir.ptr, align 8)
+ ; CHECK: $w0 = COPY [[LDARB]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(p0) = COPY $x0
+ %2:gpr(s32) = G_LOAD %0(p0) :: (load acquire 1 from %ir.ptr, align 8)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
More information about the llvm-commits
mailing list