[llvm] r324892 - [AArch64] Improve v8.1-A code-gen for atomic load-subtract

Oliver Stannard via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 12 06:22:03 PST 2018


Author: olista01
Date: Mon Feb 12 06:22:03 2018
New Revision: 324892

URL: http://llvm.org/viewvc/llvm-project?rev=324892&view=rev
Log:
[AArch64] Improve v8.1-A code-gen for atomic load-subtract

Armv8.1-A added an atomic load-add instruction, but not a load-subtract
instruction. Our current code-generation for atomic load-subtract always
inserts a NEG instruction to negate it's argument, even if it could be
folded into a constant or another instruction.

This adds lowering early in selection DAG to convert a load-subtract
operation into a subtract and a load-add, allowing the normal DAG
optimisations to work on it.

I've left the old tablegen patterns in because they are still needed for
global isel.

Some of the tests in this patch are copied from D35375 by Chad Rosier (which
was abandoned).

Differential revision: https://reviews.llvm.org/D42477


Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=324892&r1=324891&r2=324892&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Feb 12 06:22:03 2018
@@ -464,6 +464,8 @@ AArch64TargetLowering::AArch64TargetLowe
   setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
 
   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
 
   // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0.
   // This requires the Performance Monitors extension.
@@ -2679,6 +2681,8 @@ SDValue AArch64TargetLowering::LowerOper
   case ISD::VECREDUCE_FMAX:
   case ISD::VECREDUCE_FMIN:
     return LowerVECREDUCE(Op, DAG);
+  case ISD::ATOMIC_LOAD_SUB:
+    return LowerATOMIC_LOAD_SUB(Op, DAG);
   }
 }
 
@@ -7373,6 +7377,23 @@ SDValue AArch64TargetLowering::LowerVECR
   }
 }
 
+SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
+                                                    SelectionDAG &DAG) const {
+  auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
+  if (!Subtarget.hasLSE())
+    return SDValue();
+
+  // LSE has an atomic load-add instruction, but not a load-sub.
+  SDLoc dl(Op);
+  MVT VT = Op.getSimpleValueType();
+  SDValue RHS = Op.getOperand(2);
+  AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
+  RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS);
+  return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(),
+                       Op.getOperand(0), Op.getOperand(1), RHS,
+                       AN->getMemOperand());
+}
+
 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
 /// specified in the intrinsic calls.

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h?rev=324892&r1=324891&r2=324892&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Mon Feb 12 06:22:03 2018
@@ -596,6 +596,7 @@ private:
   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
                         std::vector<SDNode *> *Created) const override;

Modified: llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll?rev=324892&r1=324891&r2=324892&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll Mon Feb 12 06:22:03 2018
@@ -814,6 +814,118 @@ define void @test_atomic_load_sub_i64_no
   ret void
 }
 
+define i8 @test_atomic_load_sub_i8_neg_imm() nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i8_neg_imm:
+  %old = atomicrmw sub i8* @var8, i8 -1 seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
+; CHECK: orr w[[IMM:[0-9]+]], wzr, #0x1
+; CHECK: ldaddalb w[[IMM]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i8 %old
+}
+
+define i16 @test_atomic_load_sub_i16_neg_imm() nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i16_neg_imm:
+  %old = atomicrmw sub i16* @var16, i16 -1 seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
+; CHECK: orr w[[IMM:[0-9]+]], wzr, #0x1
+; CHECK: ldaddalh w[[IMM]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i16 %old
+}
+
+define i32 @test_atomic_load_sub_i32_neg_imm() nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i32_neg_imm:
+  %old = atomicrmw sub i32* @var32, i32 -1 seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
+; CHECK: orr w[[IMM:[0-9]+]], wzr, #0x1
+; CHECK: ldaddal w[[IMM]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i32 %old
+}
+
+define i64 @test_atomic_load_sub_i64_neg_imm() nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i64_neg_imm:
+  %old = atomicrmw sub i64* @var64, i64 -1 seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
+; CHECK: orr w[[IMM:[0-9]+]], wzr, #0x1
+; CHECK: ldaddal x[[IMM]], x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i64 %old
+}
+
+define i8 @test_atomic_load_sub_i8_neg_arg(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i8_neg_arg:
+  %neg = sub i8 0, %offset
+  %old = atomicrmw sub i8* @var8, i8 %neg seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
+; CHECK: ldaddalb w0, w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i8 %old
+}
+
+define i16 @test_atomic_load_sub_i16_neg_arg(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i16_neg_arg:
+  %neg = sub i16 0, %offset
+  %old = atomicrmw sub i16* @var16, i16 %neg seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
+; CHECK: ldaddalh w0, w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i16 %old
+}
+
+define i32 @test_atomic_load_sub_i32_neg_arg(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i32_neg_arg:
+  %neg = sub i32 0, %offset
+  %old = atomicrmw sub i32* @var32, i32 %neg seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
+; CHECK: ldaddal w0, w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i32 %old
+}
+
+define i64 @test_atomic_load_sub_i64_neg_arg(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i64_neg_arg:
+  %neg = sub i64 0, %offset
+  %old = atomicrmw sub i64* @var64, i64 %neg seq_cst
+
+; CHECK-NOT: dmb
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
+; CHECK: ldaddal x0, x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+  ret i64 %old
+}
+
 define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
 ; CHECK-LABEL: test_atomic_load_and_i8:
   %old = atomicrmw and i8* @var8, i8 %offset seq_cst




More information about the llvm-commits mailing list