[llvm] [CodeGen] Avoid sinking vector comparisons during CodeGenPrepare (PR #113158)
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 24 03:22:49 PDT 2024
https://github.com/david-arm updated https://github.com/llvm/llvm-project/pull/113158
>From 6b93bd037b5c1caec37c8c6c46d82fc13bc63db9 Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Thu, 24 Oct 2024 10:20:59 +0000
Subject: [PATCH 1/2] Add tests
---
.../CodeGen/AArch64/no-sink-vector-cmp.ll | 127 ++++++++++++++++
.../CodeGen/Thumb2/mve-sink-vector-cmp.ll | 136 ++++++++++++++++++
2 files changed, 263 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
create mode 100644 llvm/test/CodeGen/Thumb2/mve-sink-vector-cmp.ll
diff --git a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
new file mode 100644
index 00000000000000..c7e80b1c3dbb6f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
+; CHECK-LABEL: vector_loop_with_icmp:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #15 // =0xf
+; CHECK-NEXT: mov w10, #4 // =0x4
+; CHECK-NEXT: adrp x9, .LCPI0_0
+; CHECK-NEXT: adrp x11, .LCPI0_1
+; CHECK-NEXT: dup v0.2d, x8
+; CHECK-NEXT: dup v1.2d, x10
+; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q3, [x11, :lo12:.LCPI0_1]
+; CHECK-NEXT: add x9, x0, #8
+; CHECK-NEXT: mov w10, #16 // =0x10
+; CHECK-NEXT: mov w11, #1 // =0x1
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_1: // %pred.store.continue18
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: add v2.2d, v2.2d, v1.2d
+; CHECK-NEXT: add v3.2d, v3.2d, v1.2d
+; CHECK-NEXT: subs x10, x10, #4
+; CHECK-NEXT: add x9, x9, #16
+; CHECK-NEXT: b.eq .LBB0_10
+; CHECK-NEXT: .LBB0_2: // %vector.body
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmhi v4.2d, v0.2d, v3.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
+; CHECK-NEXT: umov w12, v4.h[0]
+; CHECK-NEXT: tbz w12, #0, .LBB0_4
+; CHECK-NEXT: // %bb.3: // %pred.store.if
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: stur w11, [x9, #-8]
+; CHECK-NEXT: .LBB0_4: // %pred.store.continue
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: dup v4.2d, x8
+; CHECK-NEXT: cmhi v4.2d, v4.2d, v3.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
+; CHECK-NEXT: umov w12, v4.h[1]
+; CHECK-NEXT: tbz w12, #0, .LBB0_6
+; CHECK-NEXT: // %bb.5: // %pred.store.if5
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: stur w11, [x9, #-4]
+; CHECK-NEXT: .LBB0_6: // %pred.store.continue6
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: dup v4.2d, x8
+; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
+; CHECK-NEXT: umov w12, v4.h[2]
+; CHECK-NEXT: tbz w12, #0, .LBB0_8
+; CHECK-NEXT: // %bb.7: // %pred.store.if7
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: str w11, [x9]
+; CHECK-NEXT: .LBB0_8: // %pred.store.continue8
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: dup v4.2d, x8
+; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
+; CHECK-NEXT: umov w12, v4.h[3]
+; CHECK-NEXT: tbz w12, #0, .LBB0_1
+; CHECK-NEXT: // %bb.9: // %pred.store.if9
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: str w11, [x9, #4]
+; CHECK-NEXT: b .LBB0_1
+; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ 0, %entry ], [ %index.next, %pred.store.continue18 ]
+ %vec.ind = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %entry ], [ %vec.ind.next, %pred.store.continue18 ]
+ %0 = icmp ult <4 x i64> %vec.ind, <i64 15, i64 15, i64 15, i64 15>
+ %1 = extractelement <4 x i1> %0, i64 0
+ br i1 %1, label %pred.store.if, label %pred.store.continue
+
+pred.store.if:
+ %2 = getelementptr inbounds i32, ptr %dest, i64 %index
+ store i32 1, ptr %2, align 4
+ br label %pred.store.continue
+
+pred.store.continue:
+ %3 = extractelement <4 x i1> %0, i64 1
+ br i1 %3, label %pred.store.if5, label %pred.store.continue6
+
+pred.store.if5:
+ %4 = or disjoint i64 %index, 1
+ %5 = getelementptr inbounds i32, ptr %dest, i64 %4
+ store i32 1, ptr %5, align 4
+ br label %pred.store.continue6
+
+pred.store.continue6:
+ %6 = extractelement <4 x i1> %0, i64 2
+ br i1 %6, label %pred.store.if7, label %pred.store.continue8
+
+pred.store.if7:
+ %7 = or disjoint i64 %index, 2
+ %8 = getelementptr inbounds i32, ptr %dest, i64 %7
+ store i32 1, ptr %8, align 4
+ br label %pred.store.continue8
+
+pred.store.continue8:
+ %9 = extractelement <4 x i1> %0, i64 3
+ br i1 %9, label %pred.store.if9, label %pred.store.continue18
+
+pred.store.if9:
+ %10 = or disjoint i64 %index, 3
+ %11 = getelementptr inbounds i32, ptr %dest, i64 %10
+ store i32 1, ptr %11, align 4
+ br label %pred.store.continue18
+
+pred.store.continue18:
+ %index.next = add i64 %index, 4
+ %vec.ind.next = add <4 x i64> %vec.ind, <i64 4, i64 4, i64 4, i64 4>
+ %24 = icmp eq i64 %index.next, 16
+ br i1 %24, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/llvm/test/CodeGen/Thumb2/mve-sink-vector-cmp.ll b/llvm/test/CodeGen/Thumb2/mve-sink-vector-cmp.ll
new file mode 100644
index 00000000000000..3a7460bdd24ed4
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-sink-vector-cmp.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp,+fp-armv8d16sp,+fp16,+fullfp16 < %s | FileCheck %s
+
+define arm_aapcs_vfpcc void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
+; CHECK-LABEL: vector_loop_with_icmp:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT: adr r1, .LCPI0_0
+; CHECK-NEXT: adr r2, .LCPI0_1
+; CHECK-NEXT: vldrw.u32 q0, [r1]
+; CHECK-NEXT: vldrw.u32 q1, [r2]
+; CHECK-NEXT: movs r1, #0
+; CHECK-NEXT: mov.w r12, #1
+; CHECK-NEXT: mov.w lr, #0
+; CHECK-NEXT: .LBB0_1: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vmov r2, r3, d0
+; CHECK-NEXT: vmov r4, r5, d3
+; CHECK-NEXT: vmov r6, r7, d1
+; CHECK-NEXT: subs r2, #15
+; CHECK-NEXT: sbcs r2, r3, #0
+; CHECK-NEXT: cset r2, lo
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: it ne
+; CHECK-NEXT: strne.w r12, [r0, r1, lsl #2]
+; CHECK-NEXT: subs r2, #15
+; CHECK-NEXT: sbcs r2, r3, #0
+; CHECK-NEXT: cset r2, lo
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: itt ne
+; CHECK-NEXT: orrne r2, r1, #1
+; CHECK-NEXT: strne.w r12, [r0, r2, lsl #2]
+; CHECK-NEXT: vmov r2, r3, d2
+; CHECK-NEXT: subs r2, #15
+; CHECK-NEXT: sbcs r2, r3, #0
+; CHECK-NEXT: cset r2, lo
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: itt ne
+; CHECK-NEXT: orrne r2, r1, #2
+; CHECK-NEXT: strne.w r12, [r0, r2, lsl #2]
+; CHECK-NEXT: vmov r2, r3, d3
+; CHECK-NEXT: subs r2, #15
+; CHECK-NEXT: sbcs r2, r3, #0
+; CHECK-NEXT: cset r2, lo
+; CHECK-NEXT: cmp r2, #0
+; CHECK-NEXT: itt ne
+; CHECK-NEXT: orrne r2, r1, #3
+; CHECK-NEXT: strne.w r12, [r0, r2, lsl #2]
+; CHECK-NEXT: vmov r2, r3, d2
+; CHECK-NEXT: adds r1, #4
+; CHECK-NEXT: adc lr, lr, #0
+; CHECK-NEXT: adds.w r9, r2, #4
+; CHECK-NEXT: adc r8, r3, #0
+; CHECK-NEXT: vmov r3, r2, d0
+; CHECK-NEXT: adds r4, #4
+; CHECK-NEXT: adc r5, r5, #0
+; CHECK-NEXT: adds r6, #4
+; CHECK-NEXT: adc r7, r7, #0
+; CHECK-NEXT: vmov q1[2], q1[0], r9, r4
+; CHECK-NEXT: vmov q1[3], q1[1], r8, r5
+; CHECK-NEXT: adds r3, #4
+; CHECK-NEXT: vmov q0[2], q0[0], r3, r6
+; CHECK-NEXT: adc r2, r2, #0
+; CHECK-NEXT: vmov q0[3], q0[1], r2, r7
+; CHECK-NEXT: eor r2, r1, #16
+; CHECK-NEXT: orrs.w r2, r2, lr
+; CHECK-NEXT: bne .LBB0_1
+; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.3:
+; CHECK-NEXT: .LCPI0_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .LCPI0_1:
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 3 @ 0x3
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ 0, %entry ], [ %index.next, %pred.store.continue18 ]
+ %vec.ind = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %entry ], [ %vec.ind.next, %pred.store.continue18 ]
+ %0 = icmp ult <4 x i64> %vec.ind, <i64 15, i64 15, i64 15, i64 15>
+ %1 = extractelement <4 x i1> %0, i64 0
+ br i1 %1, label %pred.store.if, label %pred.store.continue
+
+pred.store.if:
+ %2 = getelementptr inbounds i32, ptr %dest, i64 %index
+ store i32 1, ptr %2, align 4
+ br label %pred.store.continue
+
+pred.store.continue:
+ %3 = extractelement <4 x i1> %0, i64 1
+ br i1 %3, label %pred.store.if5, label %pred.store.continue6
+
+pred.store.if5:
+ %4 = or disjoint i64 %index, 1
+ %5 = getelementptr inbounds i32, ptr %dest, i64 %4
+ store i32 1, ptr %5, align 4
+ br label %pred.store.continue6
+
+pred.store.continue6:
+ %6 = extractelement <4 x i1> %0, i64 2
+ br i1 %6, label %pred.store.if7, label %pred.store.continue8
+
+pred.store.if7:
+ %7 = or disjoint i64 %index, 2
+ %8 = getelementptr inbounds i32, ptr %dest, i64 %7
+ store i32 1, ptr %8, align 4
+ br label %pred.store.continue8
+
+pred.store.continue8:
+ %9 = extractelement <4 x i1> %0, i64 3
+ br i1 %9, label %pred.store.if9, label %pred.store.continue18
+
+pred.store.if9:
+ %10 = or disjoint i64 %index, 3
+ %11 = getelementptr inbounds i32, ptr %dest, i64 %10
+ store i32 1, ptr %11, align 4
+ br label %pred.store.continue18
+
+pred.store.continue18:
+ %index.next = add i64 %index, 4
+ %vec.ind.next = add <4 x i64> %vec.ind, <i64 4, i64 4, i64 4, i64 4>
+ %24 = icmp eq i64 %index.next, 16
+ br i1 %24, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:
+ ret void
+}
>From 97fcc447109b0872e2c071c06a3b461a960e63df Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Thu, 24 Oct 2024 10:21:23 +0000
Subject: [PATCH 2/2] [CodeGen] Avoid sinking vector comparisons during
CodeGenPrepare
Whilst reviewing PR #109289 and doing some analysis with various
tests involving predicated blocks I noticed that we're making
codegen and performance worse by sinking vector comparisons
multiple times into blocks. It looks like the sinkCmpExpression
in CodeGenPrepare was written for scalar comparisons where there
is only a single condition register, whereas vector comparisons
typically produce a vector result. For some targets, such a NEON
or SVE, there are multiple allocatable vector registers that can
store the result and so we should avoid sinking in that case.
---
llvm/include/llvm/CodeGen/TargetLowering.h | 19 +++++
llvm/lib/CodeGen/CodeGenPrepare.cpp | 7 ++
llvm/lib/CodeGen/TargetLoweringBase.cpp | 1 +
.../Target/AArch64/AArch64ISelLowering.cpp | 3 +
.../CodeGen/AArch64/no-sink-vector-cmp.ll | 82 +++++++++----------
5 files changed, 69 insertions(+), 43 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 5ab31a687ec5e9..45c68747a4ec85 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -502,6 +502,11 @@ class TargetLoweringBase {
return HasMultipleConditionRegisters;
}
+ /// Return true if multiple vector predicate registers are available.
+ bool hasMultipleVectorPredicateRegisters() const {
+ return HasMultipleVectorPredicateRegisters;
+ }
+
/// Return true if the target has BitExtract instructions.
bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
@@ -2505,6 +2510,15 @@ class TargetLoweringBase {
HasMultipleConditionRegisters = hasManyRegs;
}
+ /// Tells the code generator that the target has multiple (allocatable)
+ /// vector predicate registers that can be used to store the results of
+ /// vector comparisons. With multiple predicate registers, the code
+ /// generator will not aggressively sink vector comparisons into the blocks
+ /// of their users.
+ void setHasMultipleVectorPredicateRegisters(bool hasManyRegs = true) {
+ HasMultipleVectorPredicateRegisters = hasManyRegs;
+ }
+
/// Tells the code generator that the target has BitExtract instructions.
/// The code generator will aggressively sink "shift"s into the blocks of
/// their users if the users will generate "and" instructions which can be
@@ -3477,6 +3491,11 @@ class TargetLoweringBase {
/// the blocks of their users.
bool HasMultipleConditionRegisters;
+ /// Tells the code generator that the target has multiple (allocatable)
+ /// vector predicate registers that can be used to store the results of
+ /// vector comparisons.
+ bool HasMultipleVectorPredicateRegisters;
+
/// Tells the code generator that the target has BitExtract instructions.
/// The code generator will aggressively sink "shift"s into the blocks of
/// their users if the users will generate "and" instructions which can be
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 86f28293ba9ff8..af55e9273b4e16 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1775,6 +1775,13 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
if (TLI.hasMultipleConditionRegisters())
return false;
+ // If this is a vector comparison the result may not depend upon setting a
+ // condition register, and if so it's probably better not to sink.
+ VectorType *VecType = dyn_cast<VectorType>(Cmp->getType());
+ if (VecType && VecType->getElementCount().isVector() &&
+ TLI.hasMultipleVectorPredicateRegisters())
+ return false;
+
// Avoid sinking soft-FP comparisons, since this can move them into a loop.
if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
return false;
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 7a28f7892cbf31..ef23b1d1f99fae 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -626,6 +626,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm)
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
HasMultipleConditionRegisters = false;
+ HasMultipleVectorPredicateRegisters = false;
HasExtractBitsInsn = false;
JumpIsExpensive = JumpIsExpensiveOverride;
PredictableSelectIsExpensive = false;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5e5afdb7fa0a6c..7273d949c75780 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -473,6 +473,9 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
// Compute derived properties from the register classes
computeRegisterProperties(Subtarget->getRegisterInfo());
+ if (Subtarget->hasNEON() || Subtarget->hasSVE())
+ setHasMultipleVectorPredicateRegisters(true);
+
// Provide all sorts of operation actions
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
diff --git a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
index c7e80b1c3dbb6f..93879d41a25432 100644
--- a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
@@ -6,68 +6,64 @@ target triple = "aarch64-unknown-linux-gnu"
define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
; CHECK-LABEL: vector_loop_with_icmp:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #15 // =0xf
+; CHECK-NEXT: mov w9, #15 // =0xf
; CHECK-NEXT: mov w10, #4 // =0x4
-; CHECK-NEXT: adrp x9, .LCPI0_0
+; CHECK-NEXT: adrp x8, .LCPI0_0
; CHECK-NEXT: adrp x11, .LCPI0_1
-; CHECK-NEXT: dup v0.2d, x8
+; CHECK-NEXT: dup v0.2d, x9
; CHECK-NEXT: dup v1.2d, x10
-; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI0_0]
; CHECK-NEXT: ldr q3, [x11, :lo12:.LCPI0_1]
-; CHECK-NEXT: add x9, x0, #8
-; CHECK-NEXT: mov w10, #16 // =0x10
-; CHECK-NEXT: mov w11, #1 // =0x1
+; CHECK-NEXT: add x8, x0, #8
+; CHECK-NEXT: mov w9, #16 // =0x10
+; CHECK-NEXT: mov w10, #1 // =0x1
; CHECK-NEXT: b .LBB0_2
; CHECK-NEXT: .LBB0_1: // %pred.store.continue18
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: add v2.2d, v2.2d, v1.2d
; CHECK-NEXT: add v3.2d, v3.2d, v1.2d
-; CHECK-NEXT: subs x10, x10, #4
-; CHECK-NEXT: add x9, x9, #16
+; CHECK-NEXT: subs x9, x9, #4
+; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: b.eq .LBB0_10
; CHECK-NEXT: .LBB0_2: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: cmhi v4.2d, v0.2d, v3.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
-; CHECK-NEXT: umov w12, v4.h[0]
-; CHECK-NEXT: tbz w12, #0, .LBB0_4
-; CHECK-NEXT: // %bb.3: // %pred.store.if
+; CHECK-NEXT: cmhi v4.2d, v0.2d, v2.2d
+; CHECK-NEXT: cmhi v5.2d, v0.2d, v3.2d
+; CHECK-NEXT: uzp1 v4.4s, v5.4s, v4.4s
+; CHECK-NEXT: xtn v4.4h, v4.4s
+; CHECK-NEXT: umov w11, v4.h[0]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_6
+; CHECK-NEXT: // %bb.3: // %pred.store.continue
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: stur w11, [x9, #-8]
-; CHECK-NEXT: .LBB0_4: // %pred.store.continue
+; CHECK-NEXT: umov w11, v4.h[1]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_7
+; CHECK-NEXT: .LBB0_4: // %pred.store.continue6
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: dup v4.2d, x8
-; CHECK-NEXT: cmhi v4.2d, v4.2d, v3.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
-; CHECK-NEXT: umov w12, v4.h[1]
-; CHECK-NEXT: tbz w12, #0, .LBB0_6
-; CHECK-NEXT: // %bb.5: // %pred.store.if5
+; CHECK-NEXT: umov w11, v4.h[2]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_8
+; CHECK-NEXT: .LBB0_5: // %pred.store.continue8
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: stur w11, [x9, #-4]
-; CHECK-NEXT: .LBB0_6: // %pred.store.continue6
+; CHECK-NEXT: umov w11, v4.h[3]
+; CHECK-NEXT: tbz w11, #0, .LBB0_1
+; CHECK-NEXT: b .LBB0_9
+; CHECK-NEXT: .LBB0_6: // %pred.store.if
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: dup v4.2d, x8
-; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
-; CHECK-NEXT: umov w12, v4.h[2]
-; CHECK-NEXT: tbz w12, #0, .LBB0_8
-; CHECK-NEXT: // %bb.7: // %pred.store.if7
+; CHECK-NEXT: stur w10, [x8, #-8]
+; CHECK-NEXT: umov w11, v4.h[1]
+; CHECK-NEXT: tbz w11, #0, .LBB0_4
+; CHECK-NEXT: .LBB0_7: // %pred.store.if5
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: str w11, [x9]
-; CHECK-NEXT: .LBB0_8: // %pred.store.continue8
+; CHECK-NEXT: stur w10, [x8, #-4]
+; CHECK-NEXT: umov w11, v4.h[2]
+; CHECK-NEXT: tbz w11, #0, .LBB0_5
+; CHECK-NEXT: .LBB0_8: // %pred.store.if7
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: dup v4.2d, x8
-; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
-; CHECK-NEXT: umov w12, v4.h[3]
-; CHECK-NEXT: tbz w12, #0, .LBB0_1
-; CHECK-NEXT: // %bb.9: // %pred.store.if9
+; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: umov w11, v4.h[3]
+; CHECK-NEXT: tbz w11, #0, .LBB0_1
+; CHECK-NEXT: .LBB0_9: // %pred.store.if9
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: str w11, [x9, #4]
+; CHECK-NEXT: str w10, [x8, #4]
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
; CHECK-NEXT: ret
More information about the llvm-commits
mailing list