[llvm] [CodeGen] Avoid sinking vector comparisons during CodeGenPrepare (PR #113158)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 21 05:06:46 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: David Sherwood (david-arm)
<details>
<summary>Changes</summary>
Whilst reviewing PR #<!-- -->109289 and doing some analysis with various
tests involving predicated blocks I noticed that we're making
codegen and performance worse by sinking vector comparisons
multiple times into blocks. It looks like the sinkCmpExpression
in CodeGenPrepare was written for scalar comparisons where there
is only a single condition register, whereas vector comparisons
typically produce a vector result and register pressure is much
lower. Given they are likely to be more expensive than scalar
comparisons it makes sense to avoid sinking too many. The
CodeGen/SystemZ/vec-perm-14.ll test does rely upon sinking a
vector comparison so I've kept that behaviour by permitting one
sink.
Alternatively, I could also introduce a TLI hook to query the
target if this is a preferred solution?
---
Patch is 42.92 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/113158.diff
4 Files Affected:
- (modified) llvm/lib/CodeGen/CodeGenPrepare.cpp (+32-21)
- (added) llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll (+123)
- (modified) llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll (+75-79)
- (modified) llvm/test/CodeGen/X86/masked_gather.ll (+150-169)
``````````diff
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 86f28293ba9ff8..80ace20bfc67ab 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1779,29 +1779,35 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
return false;
- // Only insert a cmp in each block once.
- DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
+ // Collect a list of non-phis users that are in blocks that are different to
+ // the definition block.
+ BasicBlock *DefBB = Cmp->getParent();
+ SmallSet<User *, 4> Users;
+ for (auto *U : Cmp->users()) {
+ Instruction *User = cast<Instruction>(U);
+ if (isa<PHINode>(User))
+ continue;
- bool MadeChange = false;
- for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
- UI != E;) {
- Use &TheUse = UI.getUse();
- Instruction *User = cast<Instruction>(*UI);
+ if (User->getParent() == DefBB)
+ continue;
- // Preincrement use iterator so we don't invalidate it.
- ++UI;
+ Users.insert(User);
+ }
- // Don't bother for PHI nodes.
- if (isa<PHINode>(User))
- continue;
+ // If this is a vector comparison the result will likely not depend upon
+ // setting a condition register, and it's probably too expensive to sink too
+ // many times.
+ VectorType *VecType = dyn_cast<VectorType>(Cmp->getType());
+ if (VecType && VecType->getElementCount().isVector() && Users.size() > 1)
+ return false;
- // Figure out which BB this cmp is used in.
- BasicBlock *UserBB = User->getParent();
- BasicBlock *DefBB = Cmp->getParent();
+ // Only insert a cmp in each block once.
+ DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
- // If this user is in the same block as the cmp, don't change the cmp.
- if (UserBB == DefBB)
- continue;
+ bool MadeChange = false;
+ for (auto *U : Users) {
+ Instruction *UI = cast<Instruction>(U);
+ BasicBlock *UserBB = UI->getParent();
// If we have already inserted a cmp into this block, use it.
CmpInst *&InsertedCmp = InsertedCmps[UserBB];
@@ -1816,10 +1822,15 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
}
- // Replace a use of the cmp with a use of the new cmp.
- TheUse = InsertedCmp;
+ // Replace all uses of the cmp with a use of the new cmp and update the
+ // number of uses.
+ for (unsigned I = 0; I < U->getNumOperands(); I++)
+ if (U->getOperand(I) == Cmp) {
+ U->setOperand(I, InsertedCmp);
+ NumCmpUses++;
+ }
+
MadeChange = true;
- ++NumCmpUses;
}
// If we removed all uses, nuke the cmp.
diff --git a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
new file mode 100644
index 00000000000000..93879d41a25432
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
+; CHECK-LABEL: vector_loop_with_icmp:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: mov w10, #4 // =0x4
+; CHECK-NEXT: adrp x8, .LCPI0_0
+; CHECK-NEXT: adrp x11, .LCPI0_1
+; CHECK-NEXT: dup v0.2d, x9
+; CHECK-NEXT: dup v1.2d, x10
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q3, [x11, :lo12:.LCPI0_1]
+; CHECK-NEXT: add x8, x0, #8
+; CHECK-NEXT: mov w9, #16 // =0x10
+; CHECK-NEXT: mov w10, #1 // =0x1
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_1: // %pred.store.continue18
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: add v2.2d, v2.2d, v1.2d
+; CHECK-NEXT: add v3.2d, v3.2d, v1.2d
+; CHECK-NEXT: subs x9, x9, #4
+; CHECK-NEXT: add x8, x8, #16
+; CHECK-NEXT: b.eq .LBB0_10
+; CHECK-NEXT: .LBB0_2: // %vector.body
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmhi v4.2d, v0.2d, v2.2d
+; CHECK-NEXT: cmhi v5.2d, v0.2d, v3.2d
+; CHECK-NEXT: uzp1 v4.4s, v5.4s, v4.4s
+; CHECK-NEXT: xtn v4.4h, v4.4s
+; CHECK-NEXT: umov w11, v4.h[0]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_6
+; CHECK-NEXT: // %bb.3: // %pred.store.continue
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: umov w11, v4.h[1]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_7
+; CHECK-NEXT: .LBB0_4: // %pred.store.continue6
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: umov w11, v4.h[2]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_8
+; CHECK-NEXT: .LBB0_5: // %pred.store.continue8
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: umov w11, v4.h[3]
+; CHECK-NEXT: tbz w11, #0, .LBB0_1
+; CHECK-NEXT: b .LBB0_9
+; CHECK-NEXT: .LBB0_6: // %pred.store.if
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: stur w10, [x8, #-8]
+; CHECK-NEXT: umov w11, v4.h[1]
+; CHECK-NEXT: tbz w11, #0, .LBB0_4
+; CHECK-NEXT: .LBB0_7: // %pred.store.if5
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: stur w10, [x8, #-4]
+; CHECK-NEXT: umov w11, v4.h[2]
+; CHECK-NEXT: tbz w11, #0, .LBB0_5
+; CHECK-NEXT: .LBB0_8: // %pred.store.if7
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: umov w11, v4.h[3]
+; CHECK-NEXT: tbz w11, #0, .LBB0_1
+; CHECK-NEXT: .LBB0_9: // %pred.store.if9
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: str w10, [x8, #4]
+; CHECK-NEXT: b .LBB0_1
+; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ 0, %entry ], [ %index.next, %pred.store.continue18 ]
+ %vec.ind = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %entry ], [ %vec.ind.next, %pred.store.continue18 ]
+ %0 = icmp ult <4 x i64> %vec.ind, <i64 15, i64 15, i64 15, i64 15>
+ %1 = extractelement <4 x i1> %0, i64 0
+ br i1 %1, label %pred.store.if, label %pred.store.continue
+
+pred.store.if:
+ %2 = getelementptr inbounds i32, ptr %dest, i64 %index
+ store i32 1, ptr %2, align 4
+ br label %pred.store.continue
+
+pred.store.continue:
+ %3 = extractelement <4 x i1> %0, i64 1
+ br i1 %3, label %pred.store.if5, label %pred.store.continue6
+
+pred.store.if5:
+ %4 = or disjoint i64 %index, 1
+ %5 = getelementptr inbounds i32, ptr %dest, i64 %4
+ store i32 1, ptr %5, align 4
+ br label %pred.store.continue6
+
+pred.store.continue6:
+ %6 = extractelement <4 x i1> %0, i64 2
+ br i1 %6, label %pred.store.if7, label %pred.store.continue8
+
+pred.store.if7:
+ %7 = or disjoint i64 %index, 2
+ %8 = getelementptr inbounds i32, ptr %dest, i64 %7
+ store i32 1, ptr %8, align 4
+ br label %pred.store.continue8
+
+pred.store.continue8:
+ %9 = extractelement <4 x i1> %0, i64 3
+ br i1 %9, label %pred.store.if9, label %pred.store.continue18
+
+pred.store.if9:
+ %10 = or disjoint i64 %index, 3
+ %11 = getelementptr inbounds i32, ptr %dest, i64 %10
+ store i32 1, ptr %11, align 4
+ br label %pred.store.continue18
+
+pred.store.continue18:
+ %index.next = add i64 %index, 4
+ %vec.ind.next = add <4 x i64> %vec.ind, <i64 4, i64 4, i64 4, i64 4>
+ %24 = icmp eq i64 %index.next, 16
+ br i1 %24, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
index 2fdf534d526565..b9f1e2d21674f3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
@@ -280,160 +280,156 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(ptr nocapture readonly %b, ptr
; CHECK-NEXT: bxeq lr
; CHECK-NEXT: .LBB2_1: @ %vector.ph
; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: sub sp, #8
; CHECK-NEXT: adds r3, r2, #3
-; CHECK-NEXT: vmov.i32 q5, #0x0
+; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: bic r3, r3, #3
+; CHECK-NEXT: movs r5, #0
; CHECK-NEXT: sub.w r12, r3, #4
; CHECK-NEXT: movs r3, #1
; CHECK-NEXT: add.w lr, r3, r12, lsr #2
+; CHECK-NEXT: adr r3, .LCPI2_1
; CHECK-NEXT: sub.w r12, r2, #1
-; CHECK-NEXT: adr r2, .LCPI2_1
; CHECK-NEXT: mov lr, lr
-; CHECK-NEXT: vldrw.u32 q0, [r2]
-; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: vldrw.u32 q0, [r3]
; CHECK-NEXT: vdup.32 q1, r12
-; CHECK-NEXT: vdup.32 q2, r12
; CHECK-NEXT: b .LBB2_3
; CHECK-NEXT: .LBB2_2: @ %else24
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vmul.f16 q5, q6, q5
+; CHECK-NEXT: vmul.f16 q3, q4, q3
; CHECK-NEXT: adds r0, #8
-; CHECK-NEXT: vcvtt.f32.f16 s23, s21
-; CHECK-NEXT: vcvtb.f32.f16 s22, s21
-; CHECK-NEXT: vcvtt.f32.f16 s21, s20
-; CHECK-NEXT: vcvtb.f32.f16 s20, s20
+; CHECK-NEXT: vcvtt.f32.f16 s15, s13
+; CHECK-NEXT: vcvtb.f32.f16 s14, s13
+; CHECK-NEXT: vcvtt.f32.f16 s13, s12
+; CHECK-NEXT: vcvtb.f32.f16 s12, s12
; CHECK-NEXT: adds r1, #8
-; CHECK-NEXT: adds r3, #4
-; CHECK-NEXT: vadd.f32 q5, q3, q5
+; CHECK-NEXT: adds r5, #4
+; CHECK-NEXT: vadd.f32 q3, q2, q3
; CHECK-NEXT: subs.w lr, lr, #1
; CHECK-NEXT: bne .LBB2_3
; CHECK-NEXT: b .LBB2_19
; CHECK-NEXT: .LBB2_3: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vadd.i32 q4, q0, r3
-; CHECK-NEXT: vmov q3, q5
-; CHECK-NEXT: vcmp.u32 cs, q1, q4
-; CHECK-NEXT: @ implicit-def: $q5
-; CHECK-NEXT: vmrs r4, p0
-; CHECK-NEXT: and r2, r4, #1
-; CHECK-NEXT: rsbs r5, r2, #0
-; CHECK-NEXT: movs r2, #0
-; CHECK-NEXT: bfi r2, r5, #0, #1
-; CHECK-NEXT: ubfx r5, r4, #4, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #1, #1
-; CHECK-NEXT: ubfx r5, r4, #8, #1
-; CHECK-NEXT: ubfx r4, r4, #12, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #2, #1
+; CHECK-NEXT: vmov q2, q3
+; CHECK-NEXT: vadd.i32 q3, q0, r5
+; CHECK-NEXT: vcmp.u32 cs, q1, q3
+; CHECK-NEXT: @ implicit-def: $q3
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r4, r3, #0
+; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: bfi r3, r4, #0, #1
+; CHECK-NEXT: ubfx r4, r2, #4, #1
; CHECK-NEXT: rsbs r4, r4, #0
-; CHECK-NEXT: bfi r2, r4, #3, #1
-; CHECK-NEXT: lsls r4, r2, #31
+; CHECK-NEXT: bfi r3, r4, #1, #1
+; CHECK-NEXT: ubfx r4, r2, #8, #1
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: bfi r3, r4, #2, #1
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: bfi r3, r2, #3, #1
+; CHECK-NEXT: lsls r2, r3, #31
; CHECK-NEXT: bne .LBB2_12
; CHECK-NEXT: @ %bb.4: @ %else
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bmi .LBB2_13
; CHECK-NEXT: .LBB2_5: @ %else5
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bmi .LBB2_14
; CHECK-NEXT: .LBB2_6: @ %else8
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bpl .LBB2_8
; CHECK-NEXT: .LBB2_7: @ %cond.load10
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s22, [r0, #6]
-; CHECK-NEXT: vins.f16 s21, s22
+; CHECK-NEXT: vldr.16 s14, [r0, #6]
+; CHECK-NEXT: vins.f16 s13, s14
; CHECK-NEXT: .LBB2_8: @ %else11
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vcmp.u32 cs, q2, q4
-; CHECK-NEXT: @ implicit-def: $q6
-; CHECK-NEXT: vmrs r4, p0
-; CHECK-NEXT: and r2, r4, #1
-; CHECK-NEXT: rsbs r5, r2, #0
-; CHECK-NEXT: movs r2, #0
-; CHECK-NEXT: bfi r2, r5, #0, #1
-; CHECK-NEXT: ubfx r5, r4, #4, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #1, #1
-; CHECK-NEXT: ubfx r5, r4, #8, #1
-; CHECK-NEXT: ubfx r4, r4, #12, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #2, #1
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: @ implicit-def: $q4
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r4, r3, #0
+; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: bfi r3, r4, #0, #1
+; CHECK-NEXT: ubfx r4, r2, #4, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: bfi r3, r4, #1, #1
+; CHECK-NEXT: ubfx r4, r2, #8, #1
+; CHECK-NEXT: ubfx r2, r2, #12, #1
; CHECK-NEXT: rsbs r4, r4, #0
-; CHECK-NEXT: bfi r2, r4, #3, #1
-; CHECK-NEXT: lsls r4, r2, #31
+; CHECK-NEXT: bfi r3, r4, #2, #1
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: bfi r3, r2, #3, #1
+; CHECK-NEXT: lsls r2, r3, #31
; CHECK-NEXT: bne .LBB2_15
; CHECK-NEXT: @ %bb.9: @ %else15
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bmi .LBB2_16
; CHECK-NEXT: .LBB2_10: @ %else18
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bmi .LBB2_17
; CHECK-NEXT: .LBB2_11: @ %else21
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bpl .LBB2_2
; CHECK-NEXT: b .LBB2_18
; CHECK-NEXT: .LBB2_12: @ %cond.load
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s20, [r0]
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: vldr.16 s12, [r0]
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bpl .LBB2_5
; CHECK-NEXT: .LBB2_13: @ %cond.load4
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s22, [r0, #2]
-; CHECK-NEXT: vins.f16 s20, s22
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: vldr.16 s14, [r0, #2]
+; CHECK-NEXT: vins.f16 s12, s14
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bpl .LBB2_6
; CHECK-NEXT: .LBB2_14: @ %cond.load7
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s21, [r0, #4]
-; CHECK-NEXT: vmovx.f16 s22, s0
-; CHECK-NEXT: vins.f16 s21, s22
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: vldr.16 s13, [r0, #4]
+; CHECK-NEXT: vmovx.f16 s14, s0
+; CHECK-NEXT: vins.f16 s13, s14
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bmi .LBB2_7
; CHECK-NEXT: b .LBB2_8
; CHECK-NEXT: .LBB2_15: @ %cond.load14
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s24, [r1]
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: vldr.16 s16, [r1]
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bpl .LBB2_10
; CHECK-NEXT: .LBB2_16: @ %cond.load17
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s26, [r1, #2]
-; CHECK-NEXT: vins.f16 s24, s26
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: vldr.16 s18, [r1, #2]
+; CHECK-NEXT: vins.f16 s16, s18
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bpl .LBB2_11
; CHECK-NEXT: .LBB2_17: @ %cond.load20
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s25, [r1, #4]
-; CHECK-NEXT: vmovx.f16 s26, s0
-; CHECK-NEXT: vins.f16 s25, s26
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: vldr.16 s17, [r1, #4]
+; CHECK-NEXT: vmovx.f16 s18, s0
+; CHECK-NEXT: vins.f16 s17, s18
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bpl.w .LBB2_2
; CHECK-NEXT: .LBB2_18: @ %cond.load23
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s26, [r1, #6]
-; CHECK-NEXT: vins.f16 s25, s26
+; CHECK-NEXT: vldr.16 s18, [r1, #6]
+; CHECK-NEXT: vins.f16 s17, s18
; CHECK-NEXT: b .LBB2_2
; CHECK-NEXT: .LBB2_19: @ %middle.block
-; CHECK-NEXT: vdup.32 q0, r12
-; CHECK-NEXT: vcmp.u32 cs, q0, q4
-; CHECK-NEXT: vpsel q0, q5, q3
+; CHECK-NEXT: vpsel q0, q3, q2
; CHECK-NEXT: vmov.f32 s4, s2
; CHECK-NEXT: vmov.f32 s5, s3
; CHECK-NEXT: vadd.f32 q0, q0, q1
; CHECK-NEXT: vmov r0, s1
; CHECK-NEXT: vadd.f32 q0, q0, r0
; CHECK-NEXT: add sp, #8
-; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.20:
diff --git a/llvm/test/CodeGen/X86/masked_gather.ll b/llvm/test/CodeGen/X86/masked_gather.ll
index 559a7ec0930b99..c3f46da299c2f8 100644
--- a/llvm/test/CodeGen/X86/masked_gather.ll
+++ b/llvm/test/CodeGen/X86/masked_gather.ll
@@ -1137,12 +1137,12 @@ define <16 x i8> @gather_v16i8_v16i32_v16i8(ptr %base, <16 x i32> %idx, <16 x i8
define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-LABEL: gather_v8i32_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE-NEXT: packssdw %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm0, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: testb $1, %al
@@ -1195,17 +1195,13 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: .LBB4_17: # %cond.load19
; SSE-NEXT: pinsrd $3, c+12(%rip), %xmm1
; SSE-NEXT: .LBB4_18: # %else20
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: pcmpeqd %xmm4, %xmm5
-; SSE-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE-NEXT: packssdw %xmm4, %xmm5
-; SSE-NEXT: packsswb %xmm5, %xmm5
-; SSE-NEXT: pmovmskb %xmm5, %eax
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: packsswb %xmm3, %xmm3
+; SSE-NEXT: pmovmskb %xmm3, %eax
; SSE-NEXT: testb $1, %al
; SSE-NEXT: je .LBB4_19
; SSE-NEXT: # %bb.20: # %cond.load23
-; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: testb $2, %al
; SSE-NEXT: jne .LBB4_22
; SSE-NEXT: jmp .LBB4_23
@@ -1215,11 +1211,11 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: jne .LBB4_17
; SSE-NEXT: jmp .LBB4_18
; SSE-NEXT: .LBB4_19:
-; SSE-NEXT: # implicit-def: $xmm4
+; SSE-NEXT: # implicit-def: $xmm3
; SSE-NEXT: testb $2, %al
; SSE-NEXT: je .LBB4_23
; SSE-NEXT: .LBB4_22: # %cond.load28
-; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm4
+; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm3
; SSE-NEXT: .LBB4_23: # %else31
; SSE-NEXT: testb $4, %al
; SSE-NEXT: jne .LBB4_24
@@ -1230,24 +1226,24 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: testb $16, %al
; SSE-NEXT: je .LBB4_28
; SSE-NEXT: .LBB4_29: # %cond.load43
-; SSE-NEXT: pinsrd $0, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $0, c+28(%rip), %xmm4
; SSE-NEXT: testb $32, %al
; SSE-NEXT: jne .LBB4_31
; SSE-NEXT: jmp .LBB4_32
; SSE-NEXT: .LBB4_24: # %cond.load33
-; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm4
+; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm3
; SSE-NEXT: testb $8, %al
; SSE-NEXT: je .LBB4_27
; SSE-NEXT: .LBB4_26: # %cond.load38
-; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm4
+; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm3
; SSE-NEXT: testb $16, %al
; SSE-NEXT: jne .LBB4_29
; SSE-NEXT: .LBB4_28:
-; SSE-NEXT: # implicit-def: $xmm5
+; SSE-NEXT: # implicit-def: $xmm4
; SSE-NEXT: testb $32, %al
; SSE-NEXT: je .LBB4_32
; SSE-NEXT: .LBB4_31: # %cond.load48
-; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm4
; SSE-NEXT: .LBB4_32: # %else51
; SSE-NEXT: testb $64, %al
; SSE-NEXT: jne .LBB4_33
@@ -1255,12 +1251,8 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: testb $-128, %al
; SSE-NEXT: je .LBB4_36
; SSE-NEXT: .LBB4_35: # %cond.load58
-; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm4
; SSE-NEXT: .LBB4_36: # %else61
-; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: pcmpeqd %xmm6, %xmm2
-; SSE-NEXT: pcmpeqd %xmm6, %xmm3
-; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: packsswb %xmm2, %xmm2
; SSE-NEXT: ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/113158
More information about the llvm-commits
mailing list