[llvm] [CodeGen] Avoid sinking vector comparisons during CodeGenPrepare (PR #113158)
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 21 05:06:12 PDT 2024
https://github.com/david-arm created https://github.com/llvm/llvm-project/pull/113158
Whilst reviewing PR #109289 and doing some analysis with various
tests involving predicated blocks I noticed that we're making
codegen and performance worse by sinking vector comparisons
multiple times into blocks. It looks like the sinkCmpExpression
in CodeGenPrepare was written for scalar comparisons where there
is only a single condition register, whereas vector comparisons
typically produce a vector result and register pressure is much
lower. Given they are likely to be more expensive than scalar
comparisons it makes sense to avoid sinking too many. The
CodeGen/SystemZ/vec-perm-14.ll test does rely upon sinking a
vector comparison so I've kept that behaviour by permitting one
sink.
Alternatively, I could also introduce a TLI hook to query the
target if this is a preferred solution?
>From deba6206a03b9343235c1660303942173b709f68 Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Mon, 21 Oct 2024 11:59:24 +0000
Subject: [PATCH 1/2] Add AArch64 vector comparison test
---
.../CodeGen/AArch64/no-sink-vector-cmp.ll | 127 ++++++++++++++++++
1 file changed, 127 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
diff --git a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
new file mode 100644
index 00000000000000..c7e80b1c3dbb6f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
+; CHECK-LABEL: vector_loop_with_icmp:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #15 // =0xf
+; CHECK-NEXT: mov w10, #4 // =0x4
+; CHECK-NEXT: adrp x9, .LCPI0_0
+; CHECK-NEXT: adrp x11, .LCPI0_1
+; CHECK-NEXT: dup v0.2d, x8
+; CHECK-NEXT: dup v1.2d, x10
+; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q3, [x11, :lo12:.LCPI0_1]
+; CHECK-NEXT: add x9, x0, #8
+; CHECK-NEXT: mov w10, #16 // =0x10
+; CHECK-NEXT: mov w11, #1 // =0x1
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_1: // %pred.store.continue18
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: add v2.2d, v2.2d, v1.2d
+; CHECK-NEXT: add v3.2d, v3.2d, v1.2d
+; CHECK-NEXT: subs x10, x10, #4
+; CHECK-NEXT: add x9, x9, #16
+; CHECK-NEXT: b.eq .LBB0_10
+; CHECK-NEXT: .LBB0_2: // %vector.body
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmhi v4.2d, v0.2d, v3.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
+; CHECK-NEXT: umov w12, v4.h[0]
+; CHECK-NEXT: tbz w12, #0, .LBB0_4
+; CHECK-NEXT: // %bb.3: // %pred.store.if
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: stur w11, [x9, #-8]
+; CHECK-NEXT: .LBB0_4: // %pred.store.continue
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: dup v4.2d, x8
+; CHECK-NEXT: cmhi v4.2d, v4.2d, v3.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
+; CHECK-NEXT: umov w12, v4.h[1]
+; CHECK-NEXT: tbz w12, #0, .LBB0_6
+; CHECK-NEXT: // %bb.5: // %pred.store.if5
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: stur w11, [x9, #-4]
+; CHECK-NEXT: .LBB0_6: // %pred.store.continue6
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: dup v4.2d, x8
+; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
+; CHECK-NEXT: umov w12, v4.h[2]
+; CHECK-NEXT: tbz w12, #0, .LBB0_8
+; CHECK-NEXT: // %bb.7: // %pred.store.if7
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: str w11, [x9]
+; CHECK-NEXT: .LBB0_8: // %pred.store.continue8
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: dup v4.2d, x8
+; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
+; CHECK-NEXT: xtn v4.2s, v4.2d
+; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
+; CHECK-NEXT: umov w12, v4.h[3]
+; CHECK-NEXT: tbz w12, #0, .LBB0_1
+; CHECK-NEXT: // %bb.9: // %pred.store.if9
+; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: str w11, [x9, #4]
+; CHECK-NEXT: b .LBB0_1
+; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ 0, %entry ], [ %index.next, %pred.store.continue18 ]
+ %vec.ind = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %entry ], [ %vec.ind.next, %pred.store.continue18 ]
+ %0 = icmp ult <4 x i64> %vec.ind, <i64 15, i64 15, i64 15, i64 15>
+ %1 = extractelement <4 x i1> %0, i64 0
+ br i1 %1, label %pred.store.if, label %pred.store.continue
+
+pred.store.if:
+ %2 = getelementptr inbounds i32, ptr %dest, i64 %index
+ store i32 1, ptr %2, align 4
+ br label %pred.store.continue
+
+pred.store.continue:
+ %3 = extractelement <4 x i1> %0, i64 1
+ br i1 %3, label %pred.store.if5, label %pred.store.continue6
+
+pred.store.if5:
+ %4 = or disjoint i64 %index, 1
+ %5 = getelementptr inbounds i32, ptr %dest, i64 %4
+ store i32 1, ptr %5, align 4
+ br label %pred.store.continue6
+
+pred.store.continue6:
+ %6 = extractelement <4 x i1> %0, i64 2
+ br i1 %6, label %pred.store.if7, label %pred.store.continue8
+
+pred.store.if7:
+ %7 = or disjoint i64 %index, 2
+ %8 = getelementptr inbounds i32, ptr %dest, i64 %7
+ store i32 1, ptr %8, align 4
+ br label %pred.store.continue8
+
+pred.store.continue8:
+ %9 = extractelement <4 x i1> %0, i64 3
+ br i1 %9, label %pred.store.if9, label %pred.store.continue18
+
+pred.store.if9:
+ %10 = or disjoint i64 %index, 3
+ %11 = getelementptr inbounds i32, ptr %dest, i64 %10
+ store i32 1, ptr %11, align 4
+ br label %pred.store.continue18
+
+pred.store.continue18:
+ %index.next = add i64 %index, 4
+ %vec.ind.next = add <4 x i64> %vec.ind, <i64 4, i64 4, i64 4, i64 4>
+ %24 = icmp eq i64 %index.next, 16
+ br i1 %24, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:
+ ret void
+}
>From fb59ac6425e1c2f0d45a429d9f5b113a2f44ff2e Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Mon, 21 Oct 2024 11:59:58 +0000
Subject: [PATCH 2/2] [CodeGen] Avoid sinking vector comparisons during
CodeGenPrepare
Whilst reviewing PR #109289 and doing some analysis with various
tests involving predicated blocks I noticed that we're making
codegen and performance worse by sinking vector comparisons
multiple times into blocks. It looks like the sinkCmpExpression
in CodeGenPrepare was written for scalar comparisons where there
is only a single condition register, whereas vector comparisons
typically produce a vector result and register pressure is much
lower. Given they are likely to be more expensive than scalar
comparisons it makes sense to avoid sinking too many. The
CodeGen/SystemZ/vec-perm-14.ll test does rely upon sinking a
vector comparison so I've kept that behaviour by permitting one
sink.
Alternatively, I could also introduce a TLI hook to query the
target if this is a preferred solution?
---
llvm/lib/CodeGen/CodeGenPrepare.cpp | 53 +--
.../CodeGen/AArch64/no-sink-vector-cmp.ll | 82 +++--
.../Thumb2/LowOverheadLoops/fast-fp-loops.ll | 154 ++++-----
llvm/test/CodeGen/X86/masked_gather.ll | 319 ++++++++----------
4 files changed, 296 insertions(+), 312 deletions(-)
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 86f28293ba9ff8..80ace20bfc67ab 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1779,29 +1779,35 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
return false;
- // Only insert a cmp in each block once.
- DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
+ // Collect a list of non-phis users that are in blocks that are different to
+ // the definition block.
+ BasicBlock *DefBB = Cmp->getParent();
+ SmallSet<User *, 4> Users;
+ for (auto *U : Cmp->users()) {
+ Instruction *User = cast<Instruction>(U);
+ if (isa<PHINode>(User))
+ continue;
- bool MadeChange = false;
- for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
- UI != E;) {
- Use &TheUse = UI.getUse();
- Instruction *User = cast<Instruction>(*UI);
+ if (User->getParent() == DefBB)
+ continue;
- // Preincrement use iterator so we don't invalidate it.
- ++UI;
+ Users.insert(User);
+ }
- // Don't bother for PHI nodes.
- if (isa<PHINode>(User))
- continue;
+ // If this is a vector comparison the result will likely not depend upon
+ // setting a condition register, and it's probably too expensive to sink too
+ // many times.
+ VectorType *VecType = dyn_cast<VectorType>(Cmp->getType());
+ if (VecType && VecType->getElementCount().isVector() && Users.size() > 1)
+ return false;
- // Figure out which BB this cmp is used in.
- BasicBlock *UserBB = User->getParent();
- BasicBlock *DefBB = Cmp->getParent();
+ // Only insert a cmp in each block once.
+ DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
- // If this user is in the same block as the cmp, don't change the cmp.
- if (UserBB == DefBB)
- continue;
+ bool MadeChange = false;
+ for (auto *U : Users) {
+ Instruction *UI = cast<Instruction>(U);
+ BasicBlock *UserBB = UI->getParent();
// If we have already inserted a cmp into this block, use it.
CmpInst *&InsertedCmp = InsertedCmps[UserBB];
@@ -1816,10 +1822,15 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
}
- // Replace a use of the cmp with a use of the new cmp.
- TheUse = InsertedCmp;
+ // Replace all uses of the cmp with a use of the new cmp and update the
+ // number of uses.
+ for (unsigned I = 0; I < U->getNumOperands(); I++)
+ if (U->getOperand(I) == Cmp) {
+ U->setOperand(I, InsertedCmp);
+ NumCmpUses++;
+ }
+
MadeChange = true;
- ++NumCmpUses;
}
// If we removed all uses, nuke the cmp.
diff --git a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
index c7e80b1c3dbb6f..93879d41a25432 100644
--- a/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
@@ -6,68 +6,64 @@ target triple = "aarch64-unknown-linux-gnu"
define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
; CHECK-LABEL: vector_loop_with_icmp:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #15 // =0xf
+; CHECK-NEXT: mov w9, #15 // =0xf
; CHECK-NEXT: mov w10, #4 // =0x4
-; CHECK-NEXT: adrp x9, .LCPI0_0
+; CHECK-NEXT: adrp x8, .LCPI0_0
; CHECK-NEXT: adrp x11, .LCPI0_1
-; CHECK-NEXT: dup v0.2d, x8
+; CHECK-NEXT: dup v0.2d, x9
; CHECK-NEXT: dup v1.2d, x10
-; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI0_0]
; CHECK-NEXT: ldr q3, [x11, :lo12:.LCPI0_1]
-; CHECK-NEXT: add x9, x0, #8
-; CHECK-NEXT: mov w10, #16 // =0x10
-; CHECK-NEXT: mov w11, #1 // =0x1
+; CHECK-NEXT: add x8, x0, #8
+; CHECK-NEXT: mov w9, #16 // =0x10
+; CHECK-NEXT: mov w10, #1 // =0x1
; CHECK-NEXT: b .LBB0_2
; CHECK-NEXT: .LBB0_1: // %pred.store.continue18
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: add v2.2d, v2.2d, v1.2d
; CHECK-NEXT: add v3.2d, v3.2d, v1.2d
-; CHECK-NEXT: subs x10, x10, #4
-; CHECK-NEXT: add x9, x9, #16
+; CHECK-NEXT: subs x9, x9, #4
+; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: b.eq .LBB0_10
; CHECK-NEXT: .LBB0_2: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: cmhi v4.2d, v0.2d, v3.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
-; CHECK-NEXT: umov w12, v4.h[0]
-; CHECK-NEXT: tbz w12, #0, .LBB0_4
-; CHECK-NEXT: // %bb.3: // %pred.store.if
+; CHECK-NEXT: cmhi v4.2d, v0.2d, v2.2d
+; CHECK-NEXT: cmhi v5.2d, v0.2d, v3.2d
+; CHECK-NEXT: uzp1 v4.4s, v5.4s, v4.4s
+; CHECK-NEXT: xtn v4.4h, v4.4s
+; CHECK-NEXT: umov w11, v4.h[0]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_6
+; CHECK-NEXT: // %bb.3: // %pred.store.continue
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: stur w11, [x9, #-8]
-; CHECK-NEXT: .LBB0_4: // %pred.store.continue
+; CHECK-NEXT: umov w11, v4.h[1]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_7
+; CHECK-NEXT: .LBB0_4: // %pred.store.continue6
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: dup v4.2d, x8
-; CHECK-NEXT: cmhi v4.2d, v4.2d, v3.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
-; CHECK-NEXT: umov w12, v4.h[1]
-; CHECK-NEXT: tbz w12, #0, .LBB0_6
-; CHECK-NEXT: // %bb.5: // %pred.store.if5
+; CHECK-NEXT: umov w11, v4.h[2]
+; CHECK-NEXT: tbnz w11, #0, .LBB0_8
+; CHECK-NEXT: .LBB0_5: // %pred.store.continue8
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: stur w11, [x9, #-4]
-; CHECK-NEXT: .LBB0_6: // %pred.store.continue6
+; CHECK-NEXT: umov w11, v4.h[3]
+; CHECK-NEXT: tbz w11, #0, .LBB0_1
+; CHECK-NEXT: b .LBB0_9
+; CHECK-NEXT: .LBB0_6: // %pred.store.if
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: dup v4.2d, x8
-; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
-; CHECK-NEXT: umov w12, v4.h[2]
-; CHECK-NEXT: tbz w12, #0, .LBB0_8
-; CHECK-NEXT: // %bb.7: // %pred.store.if7
+; CHECK-NEXT: stur w10, [x8, #-8]
+; CHECK-NEXT: umov w11, v4.h[1]
+; CHECK-NEXT: tbz w11, #0, .LBB0_4
+; CHECK-NEXT: .LBB0_7: // %pred.store.if5
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: str w11, [x9]
-; CHECK-NEXT: .LBB0_8: // %pred.store.continue8
+; CHECK-NEXT: stur w10, [x8, #-4]
+; CHECK-NEXT: umov w11, v4.h[2]
+; CHECK-NEXT: tbz w11, #0, .LBB0_5
+; CHECK-NEXT: .LBB0_8: // %pred.store.if7
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: dup v4.2d, x8
-; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
-; CHECK-NEXT: xtn v4.2s, v4.2d
-; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
-; CHECK-NEXT: umov w12, v4.h[3]
-; CHECK-NEXT: tbz w12, #0, .LBB0_1
-; CHECK-NEXT: // %bb.9: // %pred.store.if9
+; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: umov w11, v4.h[3]
+; CHECK-NEXT: tbz w11, #0, .LBB0_1
+; CHECK-NEXT: .LBB0_9: // %pred.store.if9
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: str w11, [x9, #4]
+; CHECK-NEXT: str w10, [x8, #4]
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
index 2fdf534d526565..b9f1e2d21674f3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
@@ -280,160 +280,156 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(ptr nocapture readonly %b, ptr
; CHECK-NEXT: bxeq lr
; CHECK-NEXT: .LBB2_1: @ %vector.ph
; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: sub sp, #8
; CHECK-NEXT: adds r3, r2, #3
-; CHECK-NEXT: vmov.i32 q5, #0x0
+; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: bic r3, r3, #3
+; CHECK-NEXT: movs r5, #0
; CHECK-NEXT: sub.w r12, r3, #4
; CHECK-NEXT: movs r3, #1
; CHECK-NEXT: add.w lr, r3, r12, lsr #2
+; CHECK-NEXT: adr r3, .LCPI2_1
; CHECK-NEXT: sub.w r12, r2, #1
-; CHECK-NEXT: adr r2, .LCPI2_1
; CHECK-NEXT: mov lr, lr
-; CHECK-NEXT: vldrw.u32 q0, [r2]
-; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: vldrw.u32 q0, [r3]
; CHECK-NEXT: vdup.32 q1, r12
-; CHECK-NEXT: vdup.32 q2, r12
; CHECK-NEXT: b .LBB2_3
; CHECK-NEXT: .LBB2_2: @ %else24
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vmul.f16 q5, q6, q5
+; CHECK-NEXT: vmul.f16 q3, q4, q3
; CHECK-NEXT: adds r0, #8
-; CHECK-NEXT: vcvtt.f32.f16 s23, s21
-; CHECK-NEXT: vcvtb.f32.f16 s22, s21
-; CHECK-NEXT: vcvtt.f32.f16 s21, s20
-; CHECK-NEXT: vcvtb.f32.f16 s20, s20
+; CHECK-NEXT: vcvtt.f32.f16 s15, s13
+; CHECK-NEXT: vcvtb.f32.f16 s14, s13
+; CHECK-NEXT: vcvtt.f32.f16 s13, s12
+; CHECK-NEXT: vcvtb.f32.f16 s12, s12
; CHECK-NEXT: adds r1, #8
-; CHECK-NEXT: adds r3, #4
-; CHECK-NEXT: vadd.f32 q5, q3, q5
+; CHECK-NEXT: adds r5, #4
+; CHECK-NEXT: vadd.f32 q3, q2, q3
; CHECK-NEXT: subs.w lr, lr, #1
; CHECK-NEXT: bne .LBB2_3
; CHECK-NEXT: b .LBB2_19
; CHECK-NEXT: .LBB2_3: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vadd.i32 q4, q0, r3
-; CHECK-NEXT: vmov q3, q5
-; CHECK-NEXT: vcmp.u32 cs, q1, q4
-; CHECK-NEXT: @ implicit-def: $q5
-; CHECK-NEXT: vmrs r4, p0
-; CHECK-NEXT: and r2, r4, #1
-; CHECK-NEXT: rsbs r5, r2, #0
-; CHECK-NEXT: movs r2, #0
-; CHECK-NEXT: bfi r2, r5, #0, #1
-; CHECK-NEXT: ubfx r5, r4, #4, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #1, #1
-; CHECK-NEXT: ubfx r5, r4, #8, #1
-; CHECK-NEXT: ubfx r4, r4, #12, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #2, #1
+; CHECK-NEXT: vmov q2, q3
+; CHECK-NEXT: vadd.i32 q3, q0, r5
+; CHECK-NEXT: vcmp.u32 cs, q1, q3
+; CHECK-NEXT: @ implicit-def: $q3
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r4, r3, #0
+; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: bfi r3, r4, #0, #1
+; CHECK-NEXT: ubfx r4, r2, #4, #1
; CHECK-NEXT: rsbs r4, r4, #0
-; CHECK-NEXT: bfi r2, r4, #3, #1
-; CHECK-NEXT: lsls r4, r2, #31
+; CHECK-NEXT: bfi r3, r4, #1, #1
+; CHECK-NEXT: ubfx r4, r2, #8, #1
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: bfi r3, r4, #2, #1
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: bfi r3, r2, #3, #1
+; CHECK-NEXT: lsls r2, r3, #31
; CHECK-NEXT: bne .LBB2_12
; CHECK-NEXT: @ %bb.4: @ %else
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bmi .LBB2_13
; CHECK-NEXT: .LBB2_5: @ %else5
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bmi .LBB2_14
; CHECK-NEXT: .LBB2_6: @ %else8
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bpl .LBB2_8
; CHECK-NEXT: .LBB2_7: @ %cond.load10
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s22, [r0, #6]
-; CHECK-NEXT: vins.f16 s21, s22
+; CHECK-NEXT: vldr.16 s14, [r0, #6]
+; CHECK-NEXT: vins.f16 s13, s14
; CHECK-NEXT: .LBB2_8: @ %else11
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vcmp.u32 cs, q2, q4
-; CHECK-NEXT: @ implicit-def: $q6
-; CHECK-NEXT: vmrs r4, p0
-; CHECK-NEXT: and r2, r4, #1
-; CHECK-NEXT: rsbs r5, r2, #0
-; CHECK-NEXT: movs r2, #0
-; CHECK-NEXT: bfi r2, r5, #0, #1
-; CHECK-NEXT: ubfx r5, r4, #4, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #1, #1
-; CHECK-NEXT: ubfx r5, r4, #8, #1
-; CHECK-NEXT: ubfx r4, r4, #12, #1
-; CHECK-NEXT: rsbs r5, r5, #0
-; CHECK-NEXT: bfi r2, r5, #2, #1
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: @ implicit-def: $q4
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r4, r3, #0
+; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: bfi r3, r4, #0, #1
+; CHECK-NEXT: ubfx r4, r2, #4, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: bfi r3, r4, #1, #1
+; CHECK-NEXT: ubfx r4, r2, #8, #1
+; CHECK-NEXT: ubfx r2, r2, #12, #1
; CHECK-NEXT: rsbs r4, r4, #0
-; CHECK-NEXT: bfi r2, r4, #3, #1
-; CHECK-NEXT: lsls r4, r2, #31
+; CHECK-NEXT: bfi r3, r4, #2, #1
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: bfi r3, r2, #3, #1
+; CHECK-NEXT: lsls r2, r3, #31
; CHECK-NEXT: bne .LBB2_15
; CHECK-NEXT: @ %bb.9: @ %else15
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bmi .LBB2_16
; CHECK-NEXT: .LBB2_10: @ %else18
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bmi .LBB2_17
; CHECK-NEXT: .LBB2_11: @ %else21
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bpl .LBB2_2
; CHECK-NEXT: b .LBB2_18
; CHECK-NEXT: .LBB2_12: @ %cond.load
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s20, [r0]
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: vldr.16 s12, [r0]
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bpl .LBB2_5
; CHECK-NEXT: .LBB2_13: @ %cond.load4
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s22, [r0, #2]
-; CHECK-NEXT: vins.f16 s20, s22
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: vldr.16 s14, [r0, #2]
+; CHECK-NEXT: vins.f16 s12, s14
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bpl .LBB2_6
; CHECK-NEXT: .LBB2_14: @ %cond.load7
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s21, [r0, #4]
-; CHECK-NEXT: vmovx.f16 s22, s0
-; CHECK-NEXT: vins.f16 s21, s22
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: vldr.16 s13, [r0, #4]
+; CHECK-NEXT: vmovx.f16 s14, s0
+; CHECK-NEXT: vins.f16 s13, s14
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bmi .LBB2_7
; CHECK-NEXT: b .LBB2_8
; CHECK-NEXT: .LBB2_15: @ %cond.load14
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s24, [r1]
-; CHECK-NEXT: lsls r4, r2, #30
+; CHECK-NEXT: vldr.16 s16, [r1]
+; CHECK-NEXT: lsls r2, r3, #30
; CHECK-NEXT: bpl .LBB2_10
; CHECK-NEXT: .LBB2_16: @ %cond.load17
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s26, [r1, #2]
-; CHECK-NEXT: vins.f16 s24, s26
-; CHECK-NEXT: lsls r4, r2, #29
+; CHECK-NEXT: vldr.16 s18, [r1, #2]
+; CHECK-NEXT: vins.f16 s16, s18
+; CHECK-NEXT: lsls r2, r3, #29
; CHECK-NEXT: bpl .LBB2_11
; CHECK-NEXT: .LBB2_17: @ %cond.load20
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s25, [r1, #4]
-; CHECK-NEXT: vmovx.f16 s26, s0
-; CHECK-NEXT: vins.f16 s25, s26
-; CHECK-NEXT: lsls r2, r2, #28
+; CHECK-NEXT: vldr.16 s17, [r1, #4]
+; CHECK-NEXT: vmovx.f16 s18, s0
+; CHECK-NEXT: vins.f16 s17, s18
+; CHECK-NEXT: lsls r2, r3, #28
; CHECK-NEXT: bpl.w .LBB2_2
; CHECK-NEXT: .LBB2_18: @ %cond.load23
; CHECK-NEXT: @ in Loop: Header=BB2_3 Depth=1
-; CHECK-NEXT: vldr.16 s26, [r1, #6]
-; CHECK-NEXT: vins.f16 s25, s26
+; CHECK-NEXT: vldr.16 s18, [r1, #6]
+; CHECK-NEXT: vins.f16 s17, s18
; CHECK-NEXT: b .LBB2_2
; CHECK-NEXT: .LBB2_19: @ %middle.block
-; CHECK-NEXT: vdup.32 q0, r12
-; CHECK-NEXT: vcmp.u32 cs, q0, q4
-; CHECK-NEXT: vpsel q0, q5, q3
+; CHECK-NEXT: vpsel q0, q3, q2
; CHECK-NEXT: vmov.f32 s4, s2
; CHECK-NEXT: vmov.f32 s5, s3
; CHECK-NEXT: vadd.f32 q0, q0, q1
; CHECK-NEXT: vmov r0, s1
; CHECK-NEXT: vadd.f32 q0, q0, r0
; CHECK-NEXT: add sp, #8
-; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.20:
diff --git a/llvm/test/CodeGen/X86/masked_gather.ll b/llvm/test/CodeGen/X86/masked_gather.ll
index 559a7ec0930b99..c3f46da299c2f8 100644
--- a/llvm/test/CodeGen/X86/masked_gather.ll
+++ b/llvm/test/CodeGen/X86/masked_gather.ll
@@ -1137,12 +1137,12 @@ define <16 x i8> @gather_v16i8_v16i32_v16i8(ptr %base, <16 x i32> %idx, <16 x i8
define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-LABEL: gather_v8i32_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE-NEXT: packssdw %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm0, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: testb $1, %al
@@ -1195,17 +1195,13 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: .LBB4_17: # %cond.load19
; SSE-NEXT: pinsrd $3, c+12(%rip), %xmm1
; SSE-NEXT: .LBB4_18: # %else20
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: pcmpeqd %xmm4, %xmm5
-; SSE-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE-NEXT: packssdw %xmm4, %xmm5
-; SSE-NEXT: packsswb %xmm5, %xmm5
-; SSE-NEXT: pmovmskb %xmm5, %eax
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: packsswb %xmm3, %xmm3
+; SSE-NEXT: pmovmskb %xmm3, %eax
; SSE-NEXT: testb $1, %al
; SSE-NEXT: je .LBB4_19
; SSE-NEXT: # %bb.20: # %cond.load23
-; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: testb $2, %al
; SSE-NEXT: jne .LBB4_22
; SSE-NEXT: jmp .LBB4_23
@@ -1215,11 +1211,11 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: jne .LBB4_17
; SSE-NEXT: jmp .LBB4_18
; SSE-NEXT: .LBB4_19:
-; SSE-NEXT: # implicit-def: $xmm4
+; SSE-NEXT: # implicit-def: $xmm3
; SSE-NEXT: testb $2, %al
; SSE-NEXT: je .LBB4_23
; SSE-NEXT: .LBB4_22: # %cond.load28
-; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm4
+; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm3
; SSE-NEXT: .LBB4_23: # %else31
; SSE-NEXT: testb $4, %al
; SSE-NEXT: jne .LBB4_24
@@ -1230,24 +1226,24 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: testb $16, %al
; SSE-NEXT: je .LBB4_28
; SSE-NEXT: .LBB4_29: # %cond.load43
-; SSE-NEXT: pinsrd $0, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $0, c+28(%rip), %xmm4
; SSE-NEXT: testb $32, %al
; SSE-NEXT: jne .LBB4_31
; SSE-NEXT: jmp .LBB4_32
; SSE-NEXT: .LBB4_24: # %cond.load33
-; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm4
+; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm3
; SSE-NEXT: testb $8, %al
; SSE-NEXT: je .LBB4_27
; SSE-NEXT: .LBB4_26: # %cond.load38
-; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm4
+; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm3
; SSE-NEXT: testb $16, %al
; SSE-NEXT: jne .LBB4_29
; SSE-NEXT: .LBB4_28:
-; SSE-NEXT: # implicit-def: $xmm5
+; SSE-NEXT: # implicit-def: $xmm4
; SSE-NEXT: testb $32, %al
; SSE-NEXT: je .LBB4_32
; SSE-NEXT: .LBB4_31: # %cond.load48
-; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm4
; SSE-NEXT: .LBB4_32: # %else51
; SSE-NEXT: testb $64, %al
; SSE-NEXT: jne .LBB4_33
@@ -1255,12 +1251,8 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: testb $-128, %al
; SSE-NEXT: je .LBB4_36
; SSE-NEXT: .LBB4_35: # %cond.load58
-; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm4
; SSE-NEXT: .LBB4_36: # %else61
-; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: pcmpeqd %xmm6, %xmm2
-; SSE-NEXT: pcmpeqd %xmm6, %xmm3
-; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: packsswb %xmm2, %xmm2
; SSE-NEXT: pmovmskb %xmm2, %eax
; SSE-NEXT: testb $1, %al
@@ -1271,7 +1263,7 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: jne .LBB4_40
; SSE-NEXT: jmp .LBB4_41
; SSE-NEXT: .LBB4_33: # %cond.load53
-; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm5
+; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm4
; SSE-NEXT: testb $-128, %al
; SSE-NEXT: jne .LBB4_35
; SSE-NEXT: jmp .LBB4_36
@@ -1291,7 +1283,7 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: testb $16, %al
; SSE-NEXT: je .LBB4_46
; SSE-NEXT: .LBB4_47: # %cond.load84
-; SSE-NEXT: pinsrd $0, c+28(%rip), %xmm3
+; SSE-NEXT: pinsrd $0, c+28(%rip), %xmm5
; SSE-NEXT: testb $32, %al
; SSE-NEXT: jne .LBB4_49
; SSE-NEXT: jmp .LBB4_50
@@ -1304,38 +1296,38 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; SSE-NEXT: testb $16, %al
; SSE-NEXT: jne .LBB4_47
; SSE-NEXT: .LBB4_46:
-; SSE-NEXT: # implicit-def: $xmm3
+; SSE-NEXT: # implicit-def: $xmm5
; SSE-NEXT: testb $32, %al
; SSE-NEXT: je .LBB4_50
; SSE-NEXT: .LBB4_49: # %cond.load89
-; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm3
+; SSE-NEXT: pinsrd $1, c+28(%rip), %xmm5
; SSE-NEXT: .LBB4_50: # %else92
; SSE-NEXT: testb $64, %al
; SSE-NEXT: je .LBB4_52
; SSE-NEXT: # %bb.51: # %cond.load94
-; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm3
+; SSE-NEXT: pinsrd $2, c+28(%rip), %xmm5
; SSE-NEXT: .LBB4_52: # %else97
-; SSE-NEXT: paddd %xmm4, %xmm0
-; SSE-NEXT: paddd %xmm5, %xmm1
+; SSE-NEXT: paddd %xmm3, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm1
; SSE-NEXT: testb $-128, %al
; SSE-NEXT: je .LBB4_54
; SSE-NEXT: # %bb.53: # %cond.load99
-; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm3
+; SSE-NEXT: pinsrd $3, c+28(%rip), %xmm5
; SSE-NEXT: .LBB4_54: # %else102
-; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm5, %xmm1
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: gather_v8i32_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm3
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vmovmskps %ymm1, %eax
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
; AVX1-NEXT: testb $1, %al
-; AVX1-NEXT: # implicit-def: $ymm1
+; AVX1-NEXT: # implicit-def: $ymm0
; AVX1-NEXT: jne .LBB4_1
; AVX1-NEXT: # %bb.2: # %else
; AVX1-NEXT: testb $2, %al
@@ -1354,21 +1346,22 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX1-NEXT: jne .LBB4_11
; AVX1-NEXT: .LBB4_12: # %else14
; AVX1-NEXT: testb $64, %al
-; AVX1-NEXT: jne .LBB4_13
+; AVX1-NEXT: je .LBB4_14
+; AVX1-NEXT: .LBB4_13: # %cond.load16
+; AVX1-NEXT: vbroadcastss c+12(%rip), %ymm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6],ymm0[7]
; AVX1-NEXT: .LBB4_14: # %else17
+; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm2
; AVX1-NEXT: testb $-128, %al
; AVX1-NEXT: je .LBB4_16
-; AVX1-NEXT: .LBB4_15: # %cond.load19
-; AVX1-NEXT: vbroadcastss c+12(%rip), %ymm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
+; AVX1-NEXT: # %bb.15: # %cond.load19
+; AVX1-NEXT: vbroadcastss c+12(%rip), %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
; AVX1-NEXT: .LBB4_16: # %else20
-; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT: vmovmskps %ymm3, %eax
+; AVX1-NEXT: vpacksswb %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vpmovmskb %xmm1, %eax
; AVX1-NEXT: testb $1, %al
-; AVX1-NEXT: # implicit-def: $ymm3
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: jne .LBB4_17
; AVX1-NEXT: # %bb.18: # %else26
; AVX1-NEXT: testb $2, %al
@@ -1392,16 +1385,13 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX1-NEXT: testb $-128, %al
; AVX1-NEXT: je .LBB4_32
; AVX1-NEXT: .LBB4_31: # %cond.load58
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
; AVX1-NEXT: .LBB4_32: # %else61
-; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: vpacksswb %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpmovmskb %xmm2, %eax
; AVX1-NEXT: testb $1, %al
-; AVX1-NEXT: # implicit-def: $ymm0
+; AVX1-NEXT: # implicit-def: $ymm2
; AVX1-NEXT: jne .LBB4_33
; AVX1-NEXT: # %bb.34: # %else67
; AVX1-NEXT: testb $2, %al
@@ -1416,125 +1406,120 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX1-NEXT: testb $16, %al
; AVX1-NEXT: je .LBB4_42
; AVX1-NEXT: .LBB4_41: # %cond.load84
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm2
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
; AVX1-NEXT: .LBB4_42: # %else87
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: testb $32, %al
; AVX1-NEXT: je .LBB4_44
; AVX1-NEXT: # %bb.43: # %cond.load89
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm1[5],ymm2[6,7]
; AVX1-NEXT: .LBB4_44: # %else92
-; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm1
; AVX1-NEXT: testb $64, %al
; AVX1-NEXT: je .LBB4_46
; AVX1-NEXT: # %bb.45: # %cond.load94
; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6],ymm0[7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7]
; AVX1-NEXT: .LBB4_46: # %else97
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: testb $-128, %al
; AVX1-NEXT: je .LBB4_48
; AVX1-NEXT: # %bb.47: # %cond.load99
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm2
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm1[7]
; AVX1-NEXT: .LBB4_48: # %else102
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
; AVX1-NEXT: .LBB4_1: # %cond.load
-; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: testb $2, %al
; AVX1-NEXT: je .LBB4_4
; AVX1-NEXT: .LBB4_3: # %cond.load1
-; AVX1-NEXT: vpinsrd $1, c+12(%rip), %xmm1, %xmm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vpinsrd $1, c+12(%rip), %xmm0, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: testb $4, %al
; AVX1-NEXT: je .LBB4_6
; AVX1-NEXT: .LBB4_5: # %cond.load4
-; AVX1-NEXT: vpinsrd $2, c+12(%rip), %xmm1, %xmm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vpinsrd $2, c+12(%rip), %xmm0, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: testb $8, %al
; AVX1-NEXT: je .LBB4_8
; AVX1-NEXT: .LBB4_7: # %cond.load7
-; AVX1-NEXT: vpinsrd $3, c+12(%rip), %xmm1, %xmm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vpinsrd $3, c+12(%rip), %xmm0, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: testb $16, %al
; AVX1-NEXT: je .LBB4_10
; AVX1-NEXT: .LBB4_9: # %cond.load10
; AVX1-NEXT: vbroadcastss c+12(%rip), %ymm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4],ymm1[5,6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4],ymm0[5,6,7]
; AVX1-NEXT: testb $32, %al
; AVX1-NEXT: je .LBB4_12
; AVX1-NEXT: .LBB4_11: # %cond.load13
; AVX1-NEXT: vbroadcastss c+12(%rip), %ymm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5],ymm1[6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
; AVX1-NEXT: testb $64, %al
-; AVX1-NEXT: je .LBB4_14
-; AVX1-NEXT: .LBB4_13: # %cond.load16
-; AVX1-NEXT: vbroadcastss c+12(%rip), %ymm3
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6],ymm1[7]
-; AVX1-NEXT: testb $-128, %al
-; AVX1-NEXT: jne .LBB4_15
-; AVX1-NEXT: jmp .LBB4_16
+; AVX1-NEXT: jne .LBB4_13
+; AVX1-NEXT: jmp .LBB4_14
; AVX1-NEXT: .LBB4_17: # %cond.load23
-; AVX1-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: testb $2, %al
; AVX1-NEXT: je .LBB4_20
; AVX1-NEXT: .LBB4_19: # %cond.load28
-; AVX1-NEXT: vpinsrd $1, c+28(%rip), %xmm3, %xmm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT: vpinsrd $1, c+28(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: testb $4, %al
; AVX1-NEXT: je .LBB4_22
; AVX1-NEXT: .LBB4_21: # %cond.load33
-; AVX1-NEXT: vpinsrd $2, c+28(%rip), %xmm3, %xmm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT: vpinsrd $2, c+28(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: testb $8, %al
; AVX1-NEXT: je .LBB4_24
; AVX1-NEXT: .LBB4_23: # %cond.load38
-; AVX1-NEXT: vpinsrd $3, c+28(%rip), %xmm3, %xmm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT: vpinsrd $3, c+28(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: testb $16, %al
; AVX1-NEXT: je .LBB4_26
; AVX1-NEXT: .LBB4_25: # %cond.load43
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4],ymm3[5,6,7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4],ymm1[5,6,7]
; AVX1-NEXT: testb $32, %al
; AVX1-NEXT: je .LBB4_28
; AVX1-NEXT: .LBB4_27: # %cond.load48
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5],ymm3[6,7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5],ymm1[6,7]
; AVX1-NEXT: testb $64, %al
; AVX1-NEXT: je .LBB4_30
; AVX1-NEXT: .LBB4_29: # %cond.load53
-; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm4
-; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6],ymm3[7]
+; AVX1-NEXT: vbroadcastss c+28(%rip), %ymm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6],ymm1[7]
; AVX1-NEXT: testb $-128, %al
; AVX1-NEXT: jne .LBB4_31
; AVX1-NEXT: jmp .LBB4_32
; AVX1-NEXT: .LBB4_33: # %cond.load64
-; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX1-NEXT: testb $2, %al
; AVX1-NEXT: je .LBB4_36
; AVX1-NEXT: .LBB4_35: # %cond.load69
-; AVX1-NEXT: vpinsrd $1, c+28(%rip), %xmm0, %xmm2
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vpinsrd $1, c+28(%rip), %xmm2, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: testb $4, %al
; AVX1-NEXT: je .LBB4_38
; AVX1-NEXT: .LBB4_37: # %cond.load74
-; AVX1-NEXT: vpinsrd $2, c+28(%rip), %xmm0, %xmm2
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vpinsrd $2, c+28(%rip), %xmm2, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: testb $8, %al
; AVX1-NEXT: je .LBB4_40
; AVX1-NEXT: .LBB4_39: # %cond.load79
-; AVX1-NEXT: vpinsrd $3, c+28(%rip), %xmm0, %xmm2
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vpinsrd $3, c+28(%rip), %xmm2, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: testb $16, %al
; AVX1-NEXT: jne .LBB4_41
; AVX1-NEXT: jmp .LBB4_42
@@ -1545,7 +1530,7 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vmovmskps %ymm1, %eax
; AVX2-NEXT: testb $1, %al
-; AVX2-NEXT: # implicit-def: $ymm1
+; AVX2-NEXT: # implicit-def: $ymm0
; AVX2-NEXT: jne .LBB4_1
; AVX2-NEXT: # %bb.2: # %else
; AVX2-NEXT: testb $2, %al
@@ -1561,22 +1546,29 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX2-NEXT: jne .LBB4_9
; AVX2-NEXT: .LBB4_10: # %else11
; AVX2-NEXT: testb $32, %al
-; AVX2-NEXT: jne .LBB4_11
+; AVX2-NEXT: je .LBB4_12
+; AVX2-NEXT: .LBB4_11: # %cond.load13
+; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
; AVX2-NEXT: .LBB4_12: # %else14
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: testb $64, %al
-; AVX2-NEXT: jne .LBB4_13
+; AVX2-NEXT: je .LBB4_14
+; AVX2-NEXT: # %bb.13: # %cond.load16
+; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6],ymm0[7]
; AVX2-NEXT: .LBB4_14: # %else17
+; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm2
; AVX2-NEXT: testb $-128, %al
; AVX2-NEXT: je .LBB4_16
-; AVX2-NEXT: .LBB4_15: # %cond.load19
-; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
+; AVX2-NEXT: # %bb.15: # %cond.load19
+; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
; AVX2-NEXT: .LBB4_16: # %else20
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vmovmskps %ymm2, %eax
+; AVX2-NEXT: vpacksswb %xmm2, %xmm2, %xmm1
+; AVX2-NEXT: vpmovmskb %xmm1, %eax
; AVX2-NEXT: testb $1, %al
-; AVX2-NEXT: # implicit-def: $ymm2
+; AVX2-NEXT: # implicit-def: $ymm1
; AVX2-NEXT: jne .LBB4_17
; AVX2-NEXT: # %bb.18: # %else26
; AVX2-NEXT: testb $2, %al
@@ -1601,13 +1593,12 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX2-NEXT: je .LBB4_32
; AVX2-NEXT: .LBB4_31: # %cond.load58
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
; AVX2-NEXT: .LBB4_32: # %else61
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpcmpeqd %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: vpacksswb %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpmovmskb %xmm2, %eax
; AVX2-NEXT: testb $1, %al
-; AVX2-NEXT: # implicit-def: $ymm0
+; AVX2-NEXT: # implicit-def: $ymm2
; AVX2-NEXT: jne .LBB4_33
; AVX2-NEXT: # %bb.34: # %else67
; AVX2-NEXT: testb $2, %al
@@ -1629,114 +1620,104 @@ define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {
; AVX2-NEXT: je .LBB4_46
; AVX2-NEXT: .LBB4_45: # %cond.load94
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6],ymm0[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7]
; AVX2-NEXT: .LBB4_46: # %else97
-; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: testb $-128, %al
; AVX2-NEXT: je .LBB4_48
; AVX2-NEXT: # %bb.47: # %cond.load99
-; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm1[7]
; AVX2-NEXT: .LBB4_48: # %else102
-; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
; AVX2-NEXT: .LBB4_1: # %cond.load
-; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX2-NEXT: testb $2, %al
; AVX2-NEXT: je .LBB4_4
; AVX2-NEXT: .LBB4_3: # %cond.load1
-; AVX2-NEXT: vpinsrd $1, c+12(%rip), %xmm1, %xmm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpinsrd $1, c+12(%rip), %xmm0, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: testb $4, %al
; AVX2-NEXT: je .LBB4_6
; AVX2-NEXT: .LBB4_5: # %cond.load4
-; AVX2-NEXT: vpinsrd $2, c+12(%rip), %xmm1, %xmm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpinsrd $2, c+12(%rip), %xmm0, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: testb $8, %al
; AVX2-NEXT: je .LBB4_8
; AVX2-NEXT: .LBB4_7: # %cond.load7
-; AVX2-NEXT: vpinsrd $3, c+12(%rip), %xmm1, %xmm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpinsrd $3, c+12(%rip), %xmm0, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: testb $16, %al
; AVX2-NEXT: je .LBB4_10
; AVX2-NEXT: .LBB4_9: # %cond.load10
; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7]
; AVX2-NEXT: testb $32, %al
-; AVX2-NEXT: je .LBB4_12
-; AVX2-NEXT: .LBB4_11: # %cond.load13
-; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-NEXT: testb $64, %al
-; AVX2-NEXT: je .LBB4_14
-; AVX2-NEXT: .LBB4_13: # %cond.load16
-; AVX2-NEXT: vpbroadcastd c+12(%rip), %ymm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
-; AVX2-NEXT: testb $-128, %al
-; AVX2-NEXT: jne .LBB4_15
-; AVX2-NEXT: jmp .LBB4_16
+; AVX2-NEXT: jne .LBB4_11
+; AVX2-NEXT: jmp .LBB4_12
; AVX2-NEXT: .LBB4_17: # %cond.load23
-; AVX2-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX2-NEXT: testb $2, %al
; AVX2-NEXT: je .LBB4_20
; AVX2-NEXT: .LBB4_19: # %cond.load28
-; AVX2-NEXT: vpinsrd $1, c+28(%rip), %xmm2, %xmm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT: vpinsrd $1, c+28(%rip), %xmm1, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: testb $4, %al
; AVX2-NEXT: je .LBB4_22
; AVX2-NEXT: .LBB4_21: # %cond.load33
-; AVX2-NEXT: vpinsrd $2, c+28(%rip), %xmm2, %xmm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT: vpinsrd $2, c+28(%rip), %xmm1, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: testb $8, %al
; AVX2-NEXT: je .LBB4_24
; AVX2-NEXT: .LBB4_23: # %cond.load38
-; AVX2-NEXT: vpinsrd $3, c+28(%rip), %xmm2, %xmm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT: vpinsrd $3, c+28(%rip), %xmm1, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: testb $16, %al
; AVX2-NEXT: je .LBB4_26
; AVX2-NEXT: .LBB4_25: # %cond.load43
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4],ymm1[5,6,7]
; AVX2-NEXT: testb $32, %al
; AVX2-NEXT: je .LBB4_28
; AVX2-NEXT: .LBB4_27: # %cond.load48
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5],ymm1[6,7]
; AVX2-NEXT: testb $64, %al
; AVX2-NEXT: je .LBB4_30
; AVX2-NEXT: .LBB4_29: # %cond.load53
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6],ymm1[7]
; AVX2-NEXT: testb $-128, %al
; AVX2-NEXT: jne .LBB4_31
; AVX2-NEXT: jmp .LBB4_32
; AVX2-NEXT: .LBB4_33: # %cond.load64
-; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX2-NEXT: testb $2, %al
; AVX2-NEXT: je .LBB4_36
; AVX2-NEXT: .LBB4_35: # %cond.load69
-; AVX2-NEXT: vpinsrd $1, c+28(%rip), %xmm0, %xmm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpinsrd $1, c+28(%rip), %xmm2, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: testb $4, %al
; AVX2-NEXT: je .LBB4_38
; AVX2-NEXT: .LBB4_37: # %cond.load74
-; AVX2-NEXT: vpinsrd $2, c+28(%rip), %xmm0, %xmm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpinsrd $2, c+28(%rip), %xmm2, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: testb $8, %al
; AVX2-NEXT: je .LBB4_40
; AVX2-NEXT: .LBB4_39: # %cond.load79
-; AVX2-NEXT: vpinsrd $3, c+28(%rip), %xmm0, %xmm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpinsrd $3, c+28(%rip), %xmm2, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: testb $16, %al
; AVX2-NEXT: je .LBB4_42
; AVX2-NEXT: .LBB4_41: # %cond.load84
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4],ymm0[5,6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4],ymm2[5,6,7]
; AVX2-NEXT: testb $32, %al
; AVX2-NEXT: je .LBB4_44
; AVX2-NEXT: .LBB4_43: # %cond.load89
; AVX2-NEXT: vpbroadcastd c+28(%rip), %ymm3
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
; AVX2-NEXT: testb $64, %al
; AVX2-NEXT: jne .LBB4_45
; AVX2-NEXT: jmp .LBB4_46
More information about the llvm-commits
mailing list