[llvm] fbefc62 - [AArch64][SME] Sink tile offset operands into the loop for load/store instructions.

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 28 02:28:58 PDT 2022


Author: Sander de Smalen
Date: 2022-06-28T10:28:36+01:00
New Revision: fbefc62a964f93b137668c879af11ae0426d26f5

URL: https://github.com/llvm/llvm-project/commit/fbefc62a964f93b137668c879af11ae0426d26f5
DIFF: https://github.com/llvm/llvm-project/commit/fbefc62a964f93b137668c879af11ae0426d26f5.diff

LOG: [AArch64][SME] Sink tile offset operands into the loop for load/store instructions.

This helps ISel decompose the generic offset for the tile into a base + offset.

Reviewed By: dmgreen

Differential Revision: https://reviews.llvm.org/D128508

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
    llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
    llvm/test/CodeGen/AArch64/sme-intrinsics-mova-insert.ll
    llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c601911e312c..81f48678e9a5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -12645,9 +12645,6 @@ static bool isSplatShuffle(Value *V) {
 /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
 bool AArch64TargetLowering::shouldSinkOperands(
     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
-  if (!I->getType()->isVectorTy())
-    return false;
-
   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
     switch (II->getIntrinsicID()) {
     case Intrinsic::aarch64_neon_smull:
@@ -12660,7 +12657,8 @@ bool AArch64TargetLowering::shouldSinkOperands(
       LLVM_FALLTHROUGH;
 
     case Intrinsic::fma:
-      if (cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
+      if (isa<VectorType>(I->getType()) &&
+          cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
           !Subtarget->hasFullFP16())
         return false;
       LLVM_FALLTHROUGH;
@@ -12673,7 +12671,46 @@ bool AArch64TargetLowering::shouldSinkOperands(
       if (isSplatShuffle(II->getOperand(1)))
         Ops.push_back(&II->getOperandUse(1));
       return !Ops.empty();
-
+    case Intrinsic::aarch64_sme_write_horiz:
+    case Intrinsic::aarch64_sme_write_vert:
+    case Intrinsic::aarch64_sme_writeq_horiz:
+    case Intrinsic::aarch64_sme_writeq_vert: {
+      auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
+      if (!Idx || Idx->getOpcode() != Instruction::Add)
+        return false;
+      Ops.push_back(&II->getOperandUse(1));
+      return true;
+    }
+    case Intrinsic::aarch64_sme_read_horiz:
+    case Intrinsic::aarch64_sme_read_vert:
+    case Intrinsic::aarch64_sme_readq_horiz:
+    case Intrinsic::aarch64_sme_readq_vert:
+    case Intrinsic::aarch64_sme_ld1b_vert:
+    case Intrinsic::aarch64_sme_ld1h_vert:
+    case Intrinsic::aarch64_sme_ld1w_vert:
+    case Intrinsic::aarch64_sme_ld1d_vert:
+    case Intrinsic::aarch64_sme_ld1q_vert:
+    case Intrinsic::aarch64_sme_st1b_vert:
+    case Intrinsic::aarch64_sme_st1h_vert:
+    case Intrinsic::aarch64_sme_st1w_vert:
+    case Intrinsic::aarch64_sme_st1d_vert:
+    case Intrinsic::aarch64_sme_st1q_vert:
+    case Intrinsic::aarch64_sme_ld1b_horiz:
+    case Intrinsic::aarch64_sme_ld1h_horiz:
+    case Intrinsic::aarch64_sme_ld1w_horiz:
+    case Intrinsic::aarch64_sme_ld1d_horiz:
+    case Intrinsic::aarch64_sme_ld1q_horiz:
+    case Intrinsic::aarch64_sme_st1b_horiz:
+    case Intrinsic::aarch64_sme_st1h_horiz:
+    case Intrinsic::aarch64_sme_st1w_horiz:
+    case Intrinsic::aarch64_sme_st1d_horiz:
+    case Intrinsic::aarch64_sme_st1q_horiz: {
+      auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
+      if (!Idx || Idx->getOpcode() != Instruction::Add)
+        return false;
+      Ops.push_back(&II->getOperandUse(3));
+      return true;
+    }
     case Intrinsic::aarch64_neon_pmull:
       if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
         return false;
@@ -12692,6 +12729,9 @@ bool AArch64TargetLowering::shouldSinkOperands(
     }
   }
 
+  if (!I->getType()->isVectorTy())
+    return false;
+
   switch (I->getOpcode()) {
   case Instruction::Sub:
   case Instruction::Add: {

diff  --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
index 167e501ff3c1..673410cfd4b8 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
@@ -295,6 +295,40 @@ define void @ldr_with_off_16mulvl(ptr %ptr) {
   ret void;
 }
 
+; Ensure that the tile offset is sunk, given that this is likely to be an 'add'
+; that's decomposed into a base + offset in ISel.
+define void @test_ld1_sink_tile0_offset_operand(<vscale x 16 x i1> %pg, ptr %src, i32 %base, i32 %N) {
+; CHECK-LABEL: test_ld1_sink_tile0_offset_operand:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w12, w1
+; CHECK-NEXT:  .LBB14_1: // %for.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ld1w {za0h.s[w12, 0]}, p0/z, [x0]
+; CHECK-NEXT:    subs w2, w2, #1
+; CHECK-NEXT:    ld1w {za0h.s[w12, 1]}, p0/z, [x0]
+; CHECK-NEXT:    ld1w {za0h.s[w12, 2]}, p0/z, [x0]
+; CHECK-NEXT:    b.ne .LBB14_1
+; CHECK-NEXT:  // %bb.2: // %exit
+; CHECK-NEXT:    ret
+entry:
+  %add1 = add i32 %base, 1
+  %add2 = add i32 %base, 2
+  br label %for.body
+
+for.body:
+  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 16 x i1> %pg, ptr %src, i64 0, i32 %base)
+  call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 16 x i1> %pg, ptr %src, i64 0, i32 %add1)
+  call void @llvm.aarch64.sme.ld1w.horiz(<vscale x 16 x i1> %pg, ptr %src, i64 0, i32 %add2)
+  %inc = add nuw nsw i32 %i, 1
+  %exitcond.not = icmp eq i32 %inc, %N
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+
 declare void @llvm.aarch64.sme.ld1b.horiz(<vscale x 16 x i1>, ptr, i64, i32)
 declare void @llvm.aarch64.sme.ld1h.horiz(<vscale x 16 x i1>, ptr, i64, i32)
 declare void @llvm.aarch64.sme.ld1w.horiz(<vscale x 16 x i1>, ptr, i64, i32)

diff  --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
index 3ea1ca10bcc9..bbefd88845ee 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sme -verify-machineinstrs < %s | FileCheck %s
 
 define <vscale x 16 x i8> @extract_row_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %tileslice) {
 ; CHECK-LABEL: extract_row_b:
@@ -435,6 +435,45 @@ define <vscale x 2 x double> @extract_col_q_v2f64(<vscale x 2 x double> %zd, <vs
   ret <vscale x 2 x double> %res
 }
 
+define <vscale x 4 x i32> @test_sink_offset_operand(<vscale x 4 x i1> %pg, i32 %base, i32 %N) {
+; CHECK-LABEL: test_sink_offset_operand:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w12, w0
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:  .LBB26_1: // %for.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z1.s, p0/m, za0h.s[w12, 0]
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    mov z2.s, p0/m, za0h.s[w12, 1]
+; CHECK-NEXT:    subs w1, w1, #3
+; CHECK-NEXT:    mov z3.s, p0/m, za0h.s[w12, 2]
+; CHECK-NEXT:    b.ne .LBB26_1
+; CHECK-NEXT:  // %bb.2: // %exit
+; CHECK-NEXT:    add z0.s, z1.s, z2.s
+; CHECK-NEXT:    add z0.s, z0.s, z3.s
+; CHECK-NEXT:    ret
+entry:
+  %add1 = add i32 %base, 1
+  %add2 = add i32 %base, 2
+  br label %for.body
+
+for.body:
+  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %z0 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.horiz.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i64 0, i32 %base)
+  %z1 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.horiz.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i64 0, i32 %add1)
+  %z2 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.horiz.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i64 0, i32 %add2)
+  %inc = add nuw nsw i32 %i, 3
+  %exitcond.not = icmp eq i32 %inc, %N
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  %tmp1 = add <vscale x 4 x i32> %z0, %z1
+  %res = add <vscale x 4 x i32> %tmp1, %z2
+  ret <vscale x 4 x i32> %res
+}
+
 declare <vscale x 16 x i8> @llvm.aarch64.sme.read.horiz.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i64, i32)
 declare <vscale x 8 x i16> @llvm.aarch64.sme.read.horiz.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i64, i32)
 declare <vscale x 8 x half> @llvm.aarch64.sme.read.horiz.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i64, i32)

diff  --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-insert.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-insert.ll
index 5d7427ff8a71..3a8551215f38 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-insert.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-insert.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sme -verify-machineinstrs < %s | FileCheck %s
 
 define void @insert_row_b(i32 %tileslice, <vscale x 16 x i1> %pg,
 ; CHECK-LABEL: insert_row_b:
@@ -438,6 +438,37 @@ define void @insert_col_q_v2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %zn
   ret void
 }
 
+define void @test_sink_offset_operand(<vscale x 4 x i1> %pg, i32 %base, i32 %N) {
+; CHECK-LABEL: test_sink_offset_operand:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w12, w0
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:  .LBB28_1: // %for.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    subs w1, w1, #3
+; CHECK-NEXT:    mov za0h.s[w12, 0], p0/m, z0.s
+; CHECK-NEXT:    mov za0h.s[w12, 1], p0/m, z0.s
+; CHECK-NEXT:    mov za0h.s[w12, 2], p0/m, z0.s
+; CHECK-NEXT:    b.ne .LBB28_1
+; CHECK-NEXT:  // %bb.2: // %exit
+; CHECK-NEXT:    ret
+entry:
+  %add1 = add i32 %base, 1
+  %add2 = add i32 %base, 2
+  br label %for.body
+
+for.body:
+  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  call void @llvm.aarch64.sme.write.horiz.nxv4i32(i64 0, i32 %base, <vscale x 4 x i1> %pg, <vscale x 4 x i32> zeroinitializer)
+  call void @llvm.aarch64.sme.write.horiz.nxv4i32(i64 0, i32 %add1, <vscale x 4 x i1> %pg, <vscale x 4 x i32> zeroinitializer)
+  call void @llvm.aarch64.sme.write.horiz.nxv4i32(i64 0, i32 %add2, <vscale x 4 x i1> %pg, <vscale x 4 x i32> zeroinitializer)
+  %inc = add nuw nsw i32 %i, 3
+  %exitcond.not = icmp eq i32 %inc, %N
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
 
 declare void @llvm.aarch64.sme.write.horiz.nxv16i8(i64, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
 declare void @llvm.aarch64.sme.write.horiz.nxv8i16(i64, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)

diff  --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
index 00d078305d06..82e6c43d28ae 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
@@ -295,6 +295,39 @@ define void @str_with_off_16mulvl(ptr %ptr) {
   ret void;
 }
 
+; Ensure that the tile offset is sunk, given that this is likely to be an 'add'
+; that's decomposed into a base + offset in ISel.
+define void @test_sink_tile0_offset_operand(<vscale x 16 x i1> %pg, ptr %src, i32 %base, i32 %N) {
+; CHECK-LABEL: test_sink_tile0_offset_operand:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w12, w1
+; CHECK-NEXT:  .LBB14_1: // %for.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    st1w {za0h.s[w12, 0]}, p0, [x0]
+; CHECK-NEXT:    subs w2, w2, #1
+; CHECK-NEXT:    st1w {za0h.s[w12, 1]}, p0, [x0]
+; CHECK-NEXT:    st1w {za0h.s[w12, 2]}, p0, [x0]
+; CHECK-NEXT:    b.ne .LBB14_1
+; CHECK-NEXT:  // %bb.2: // %exit
+; CHECK-NEXT:    ret
+entry:
+  %add0 = add i32 %base, 1
+  %add1 = add i32 %base, 2
+  br label %for.body
+
+for.body:
+  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 16 x i1> %pg, ptr %src, i64 0, i32 %base)
+  tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 16 x i1> %pg, ptr %src, i64 0, i32 %add0)
+  tail call void @llvm.aarch64.sme.st1w.horiz(<vscale x 16 x i1> %pg, ptr %src, i64 0, i32 %add1)
+  %inc = add nuw nsw i32 %i, 1
+  %exitcond.not = icmp eq i32 %inc, %N
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
 declare void @llvm.aarch64.sme.st1b.horiz(<vscale x 16 x i1>, ptr, i64, i32)
 declare void @llvm.aarch64.sme.st1h.horiz(<vscale x 16 x i1>, ptr, i64, i32)
 declare void @llvm.aarch64.sme.st1w.horiz(<vscale x 16 x i1>, ptr, i64, i32)


        


More information about the llvm-commits mailing list