[llvm] 1399249 - [AArch64][CodeGen] Emit alignment "Max Skip" operand for AArch64 loops

Nicholas Guy via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 5 04:54:37 PST 2022


Author: Nicholas Guy
Date: 2022-01-05T12:54:31Z
New Revision: 13992498cd96dcef34cdf1ff02730d266feb0730

URL: https://github.com/llvm/llvm-project/commit/13992498cd96dcef34cdf1ff02730d266feb0730
DIFF: https://github.com/llvm/llvm-project/commit/13992498cd96dcef34cdf1ff02730d266feb0730.diff

LOG: [AArch64][CodeGen] Emit alignment "Max Skip" operand for AArch64 loops

Differential Revision: https://reviews.llvm.org/D114879

Added: 
    llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64Subtarget.cpp
    llvm/lib/Target/AArch64/AArch64Subtarget.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1dfc51b1358d..01e581e81ab8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -962,6 +962,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   setMinFunctionAlignment(Align(4));
   // Set preferred alignments.
   setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
+  setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
   setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
 
   // Only change the limit for entries in a jump table if specified by

diff  --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index f7d3dd0bc222..a5ae7c3a24a8 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -157,13 +157,19 @@ void AArch64Subtarget::initializeProperties() {
     break;
   case NeoverseN1:
     PrefFunctionLogAlignment = 4;
+    PrefLoopLogAlignment = 5;
+    MaxBytesForLoopAlignment = 16;
     break;
   case NeoverseN2:
     PrefFunctionLogAlignment = 4;
+    PrefLoopLogAlignment = 5;
+    MaxBytesForLoopAlignment = 16;
     VScaleForTuning = 1;
     break;
   case NeoverseV1:
     PrefFunctionLogAlignment = 4;
+    PrefLoopLogAlignment = 5;
+    MaxBytesForLoopAlignment = 16;
     VScaleForTuning = 2;
     break;
   case Neoverse512TVB:

diff  --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index bcd3d873985f..adb10a43769d 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -279,6 +279,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
   unsigned MaxPrefetchIterationsAhead = UINT_MAX;
   unsigned PrefFunctionLogAlignment = 0;
   unsigned PrefLoopLogAlignment = 0;
+  unsigned MaxBytesForLoopAlignment = 0;
   unsigned MaxJumpTableSize = 0;
   unsigned WideningBaseCost = 0;
 
@@ -470,6 +471,10 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
   }
   unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
 
+  unsigned getMaxBytesForLoopAlignment() const {
+    return MaxBytesForLoopAlignment;
+  }
+
   unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; }
 
   unsigned getWideningBaseCost() const { return WideningBaseCost; }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll
new file mode 100644
index 000000000000..ab3d8494459a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll
@@ -0,0 +1,77 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -align-loops=32 < %s -o -| FileCheck %s --check-prefixes=CHECK,CHECK-DEFAULT
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=neoverse-n1 < %s -o -| FileCheck %s --check-prefixes=CHECK,CHECK-N1
+
+define i32 @a(i32 %x, i32* nocapture readonly %y, i32* nocapture readonly %z) {
+; CHECK-DEFAULT:    .p2align 5
+; CHECK-N1:         .p2align 5, 0x0, 16
+; CHECK-NEXT:       .LBB0_5: // %vector.body
+; CHECK-DEFAULT:    .p2align 5
+; CHECK-N1:         .p2align 5, 0x0, 16
+; CHECK-NEXT:       .LBB0_8: // %for.body
+entry:
+  %cmp10 = icmp sgt i32 %x, 0
+  br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %x to i64
+  %min.iters.check = icmp ult i32 %x, 8
+  br i1 %min.iters.check, label %for.body.preheader17, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i64 %wide.trip.count, 4294967288
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %10, %vector.body ]
+  %vec.phi13 = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %11, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %y, i64 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = getelementptr inbounds i32, i32* %0, i64 4
+  %3 = bitcast i32* %2 to <4 x i32>*
+  %wide.load14 = load <4 x i32>, <4 x i32>* %3, align 4
+  %4 = getelementptr inbounds i32, i32* %z, i64 %index
+  %5 = bitcast i32* %4 to <4 x i32>*
+  %wide.load15 = load <4 x i32>, <4 x i32>* %5, align 4
+  %6 = getelementptr inbounds i32, i32* %4, i64 4
+  %7 = bitcast i32* %6 to <4 x i32>*
+  %wide.load16 = load <4 x i32>, <4 x i32>* %7, align 4
+  %8 = add <4 x i32> %wide.load, %vec.phi
+  %9 = add <4 x i32> %wide.load14, %vec.phi13
+  %10 = add <4 x i32> %8, %wide.load15
+  %11 = add <4 x i32> %9, %wide.load16
+  %index.next = add nuw i64 %index, 8
+  %12 = icmp eq i64 %index.next, %n.vec
+  br i1 %12, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %bin.rdx = add <4 x i32> %11, %10
+  %13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %bin.rdx)
+  %cmp.n = icmp eq i64 %n.vec, %wide.trip.count
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader17
+
+for.body.preheader17:                             ; preds = %for.body.preheader, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %b.011.ph = phi i32 [ 0, %for.body.preheader ], [ %13, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %b.0.lcssa = phi i32 [ 0, %entry ], [ %13, %middle.block ], [ %add3, %for.body ]
+  ret i32 %b.0.lcssa
+
+for.body:                                         ; preds = %for.body.preheader17, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader17 ]
+  %b.011 = phi i32 [ %add3, %for.body ], [ %b.011.ph, %for.body.preheader17 ]
+  %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+  %14 = load i32, i32* %arrayidx, align 4
+  %arrayidx2 = getelementptr inbounds i32, i32* %z, i64 %indvars.iv
+  %15 = load i32, i32* %arrayidx2, align 4
+  %add = add i32 %14, %b.011
+  %add3 = add i32 %add, %15
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)


        


More information about the llvm-commits mailing list