[llvm] fb7c380 - [AArch64][ISel] Always use pre-inc/post-inc addressing mode for auto-indexed load/store with constant offset.

Huihui Zhang via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 16 16:19:22 PST 2023


Author: Huihui Zhang
Date: 2023-02-16T16:19:09-08:00
New Revision: fb7c38073e800ec35f2d54d79630c68a4b901b4c

URL: https://github.com/llvm/llvm-project/commit/fb7c38073e800ec35f2d54d79630c68a4b901b4c
DIFF: https://github.com/llvm/llvm-project/commit/fb7c38073e800ec35f2d54d79630c68a4b901b4c.diff

LOG: [AArch64][ISel] Always use pre-inc/post-inc addressing mode for auto-indexed load/store with constant offset.

Unlike ARM target, current AArch64 target doesn't have facility to encode the
operation bit: whether to add an offset to base pointer for pre-inc/post-inc
addressing mode, or to subtract an offset from base pointer for
pre-dec/post-dec addressing mode.

A mis-compile (https://github.com/llvm/llvm-project/issues/60645) was noticed
due to this limitation.

Therefore, for AArch64 auto-indexed load/store with constant offset, always
use pre-inc/post-inc addressing mode. The constant offset is negated for
pre-dec/post-dec addressing mode.
An auto-indexed address with non-constant offset is currently not split into
base and offset parts. If we are to handle non-constant offset in the future,
offset node will need to take a negate.

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D143796

Added: 
    llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 17227cf03f0f8..c69b164ef1218 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -21899,9 +21899,10 @@ bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
   return CI->isTailCall();
 }
 
-bool AArch64TargetLowering::getIndexedAddressParts(
-    SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
-    ISD::MemIndexedMode &AM, bool &IsInc, SelectionDAG &DAG) const {
+bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
+                                                   SDValue &Base,
+                                                   SDValue &Offset,
+                                                   SelectionDAG &DAG) const {
   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
     return false;
 
@@ -21940,8 +21941,9 @@ bool AArch64TargetLowering::getIndexedAddressParts(
       RHSC = -(uint64_t)RHSC;
     if (!isInt<9>(RHSC))
       return false;
-    IsInc = (Op->getOpcode() == ISD::ADD);
-    Offset = Op->getOperand(1);
+    // Always emit pre-inc/post-inc addressing mode. Use negated constant offset
+    // when dealing with subtraction.
+    Offset = DAG.getConstant(RHSC, SDLoc(N), RHS->getValueType(0));
     return true;
   }
   return false;
@@ -21962,10 +21964,9 @@ bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
   } else
     return false;
 
-  bool IsInc;
-  if (!getIndexedAddressParts(N, Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
+  if (!getIndexedAddressParts(N, Ptr.getNode(), Base, Offset, DAG))
     return false;
-  AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
+  AM = ISD::PRE_INC;
   return true;
 }
 
@@ -21983,14 +21984,13 @@ bool AArch64TargetLowering::getPostIndexedAddressParts(
   } else
     return false;
 
-  bool IsInc;
-  if (!getIndexedAddressParts(N, Op, Base, Offset, AM, IsInc, DAG))
+  if (!getIndexedAddressParts(N, Op, Base, Offset, DAG))
     return false;
   // Post-indexing updates the base, so it's not a valid transform
   // if that's not the same as the load's pointer.
   if (Ptr != Base)
     return false;
-  AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
+  AM = ISD::POST_INC;
   return true;
 }
 

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 163e736718596..6db1ce4022fb9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1167,8 +1167,7 @@ class AArch64TargetLowering : public TargetLowering {
   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
   bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
-                              SDValue &Offset, ISD::MemIndexedMode &AM,
-                              bool &IsInc, SelectionDAG &DAG) const;
+                              SDValue &Offset, SelectionDAG &DAG) const;
   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
                                  ISD::MemIndexedMode &AM,
                                  SelectionDAG &DAG) const override;

diff  --git a/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll b/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll
new file mode 100644
index 0000000000000..c93864cf73d2c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s
+
+; Reduced test from https://github.com/llvm/llvm-project/issues/60645.
+; To check that we are generating -32 as offset for the first store.
+
+define i8* @pr60645(i8* %ptr, i64 %t0) {
+; CHECK-LABEL: pr60645:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, x1, lsl #2
+; CHECK-NEXT:    str wzr, [x8, #-32]!
+; CHECK-NEXT:    stur wzr, [x8, #-8]
+; CHECK-NEXT:    ret
+  %t1 = add nuw nsw i64 %t0, 8
+  %t2 = mul i64 %t1, -4
+  %t3 = getelementptr i8, i8* %ptr, i64 %t2
+  %t4 = bitcast i8* %t3 to i32*
+  store i32 0, i32* %t4, align 4
+  %t5 = shl i64 %t1, 2
+  %t6 = sub nuw nsw i64 -8, %t5
+  %t7 = getelementptr i8, i8* %ptr, i64 %t6
+  %t8 = bitcast i8* %t7 to i32*
+  store i32 0, i32* %t8, align 4
+  ret i8* %ptr
+}


        


More information about the llvm-commits mailing list