<div dir="ltr">Hello Pengfei,<br><br>This commit broke test to the builder:<br><a href="http://lab.llvm.org:8011/builders/llvm-clang-x86_64-expensive-checks-win/builds/21139">http://lab.llvm.org:8011/builders/llvm-clang-x86_64-expensive-checks-win/builds/21139</a><br><br>. . . <br>Failing Tests (1):<br><div> LLVM :: CodeGen/X86/masked_gather.ll</div><div><br></div><div></div>Please have a look ASAP?<br><br>Thanks<br><br>Galina</div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Tue, Dec 17, 2019 at 8:25 PM via llvm-commits <<a href="mailto:llvm-commits@lists.llvm.org">llvm-commits@lists.llvm.org</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><br>
Author: Wang, Pengfei<br>
Date: 2019-12-18T12:24:58+08:00<br>
New Revision: 8cc0b586738157728a93af145c8f8dec1bf59ee1<br>
<br>
URL: <a href="https://github.com/llvm/llvm-project/commit/8cc0b586738157728a93af145c8f8dec1bf59ee1" rel="noreferrer" target="_blank">https://github.com/llvm/llvm-project/commit/8cc0b586738157728a93af145c8f8dec1bf59ee1</a><br>
DIFF: <a href="https://github.com/llvm/llvm-project/commit/8cc0b586738157728a93af145c8f8dec1bf59ee1.diff" rel="noreferrer" target="_blank">https://github.com/llvm/llvm-project/commit/8cc0b586738157728a93af145c8f8dec1bf59ee1.diff</a><br>
<br>
LOG: [X86] Add calculation for elements in structures in getting uniform base for the Gather/Scatter intrinsic.<br>
<br>
Summary: Add calculation for elements in structures in getting uniform<br>
base for the Gather/Scatter intrinsic.<br>
<br>
Reviewers: craig.topper, c-rhodes, RKSimon<br>
<br>
Subscribers: hiraditya, llvm-commits, annita.zhang, LuoYuanke<br>
<br>
Tags: #llvm<br>
<br>
Differential Revision: <a href="https://reviews.llvm.org/D71442" rel="noreferrer" target="_blank">https://reviews.llvm.org/D71442</a><br>
<br>
Added: <br>
<br>
<br>
Modified: <br>
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp<br>
llvm/test/CodeGen/X86/masked_gather.ll<br>
<br>
Removed: <br>
<br>
<br>
<br>
################################################################################<br>
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp<br>
index 7b2430a60995..27ac489b35b7 100644<br>
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp<br>
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp<br>
@@ -4353,9 +4353,10 @@ static bool getUniformBase(const Value *&Ptr, SDValue &Base, SDValue &Index,<br>
<br>
unsigned FinalIndex = GEP->getNumOperands() - 1;<br>
Value *IndexVal = GEP->getOperand(FinalIndex);<br>
+ gep_type_iterator GTI = gep_type_begin(*GEP);<br>
<br>
// Ensure all the other indices are 0.<br>
- for (unsigned i = 1; i < FinalIndex; ++i) {<br>
+ for (unsigned i = 1; i < FinalIndex; ++i, ++GTI) {<br>
auto *C = dyn_cast<Constant>(GEP->getOperand(i));<br>
if (!C)<br>
return false;<br>
@@ -4368,18 +4369,39 @@ static bool getUniformBase(const Value *&Ptr, SDValue &Base, SDValue &Index,<br>
<br>
// The operands of the GEP may be defined in another basic block.<br>
// In this case we'll not find nodes for the operands.<br>
- if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))<br>
+ if (!SDB->findValue(Ptr))<br>
+ return false;<br>
+ Constant *C = dyn_cast<Constant>(IndexVal);<br>
+ if (!C && !SDB->findValue(IndexVal))<br>
return false;<br>
<br>
const TargetLowering &TLI = DAG.getTargetLoweringInfo();<br>
const DataLayout &DL = DAG.getDataLayout();<br>
- Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()),<br>
- SDB->getCurSDLoc(), TLI.getPointerTy(DL));<br>
+ StructType *STy = GTI.getStructTypeOrNull();<br>
+<br>
+ if (STy) {<br>
+ const StructLayout *SL = DL.getStructLayout(STy);<br>
+ if (isa<VectorType>(C->getType())) {<br>
+ C = C->getSplatValue();<br>
+ // FIXME: If getSplatValue may return nullptr for a structure?<br>
+ // If not, the following check can be removed.<br>
+ if (!C)<br>
+ return false;<br>
+ }<br>
+ auto *CI = cast<ConstantInt>(C);<br>
+ Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));<br>
+ Index = DAG.getTargetConstant(SL->getElementOffset(CI->getZExtValue()),<br>
+ SDB->getCurSDLoc(), TLI.getPointerTy(DL));<br>
+ } else {<br>
+ Scale = DAG.getTargetConstant(<br>
+ DL.getTypeAllocSize(GEP->getResultElementType()),<br>
+ SDB->getCurSDLoc(), TLI.getPointerTy(DL));<br>
+ Index = SDB->getValue(IndexVal);<br>
+ }<br>
Base = SDB->getValue(Ptr);<br>
- Index = SDB->getValue(IndexVal);<br>
IndexType = ISD::SIGNED_SCALED;<br>
<br>
- if (!Index.getValueType().isVector()) {<br>
+ if (STy || !Index.getValueType().isVector()) {<br>
unsigned GEPWidth = GEP->getType()->getVectorNumElements();<br>
EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);<br>
Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);<br>
<br>
diff --git a/llvm/test/CodeGen/X86/masked_gather.ll b/llvm/test/CodeGen/X86/masked_gather.ll<br>
index 0dabe546a145..dd41009a9aee 100644<br>
--- a/llvm/test/CodeGen/X86/masked_gather.ll<br>
+++ b/llvm/test/CodeGen/X86/masked_gather.ll<br>
@@ -8,6 +8,10 @@<br>
; vXf32<br>
;<br>
<br>
+%struct.a = type { [4 x i32], [4 x i8], %struct.b, i32 }<br>
+%struct.b = type { i32, i32 }<br>
+@c = external dso_local global %struct.a, align 4<br>
+<br>
define <4 x float> @gather_v4f32_ptr_v4i32(<4 x float*> %ptr, <4 x i32> %trigger, <4 x float> %passthru) {<br>
; SSE-LABEL: gather_v4f32_ptr_v4i32:<br>
; SSE: # %bb.0:<br>
@@ -1016,6 +1020,735 @@ define <16 x i8> @gather_v16i8_v16i32_v16i8(i8* %base, <16 x i32> %idx, <16 x i8<br>
ret <16 x i8> %res<br>
}<br>
<br>
+define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) {<br>
+; SSE-LABEL: gather_v8i32_v8i32:<br>
+; SSE: # %bb.0:<br>
+; SSE-NEXT: movdqa %xmm1, %xmm3<br>
+; SSE-NEXT: movdqa %xmm0, %xmm2<br>
+; SSE-NEXT: movl $c, %eax<br>
+; SSE-NEXT: movq %rax, %xmm0<br>
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]<br>
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [12,12]<br>
+; SSE-NEXT: paddq %xmm4, %xmm5<br>
+; SSE-NEXT: pxor %xmm0, %xmm0<br>
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1<br>
+; SSE-NEXT: pcmpeqd %xmm2, %xmm0<br>
+; SSE-NEXT: packssdw %xmm1, %xmm0<br>
+; SSE-NEXT: packsswb %xmm0, %xmm0<br>
+; SSE-NEXT: pmovmskb %xmm0, %eax<br>
+; SSE-NEXT: testb $1, %al<br>
+; SSE-NEXT: je .LBB4_1<br>
+; SSE-NEXT: # %bb.2: # %cond.load<br>
+; SSE-NEXT: movq %xmm5, %rcx<br>
+; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
+; SSE-NEXT: testb $2, %al<br>
+; SSE-NEXT: jne .LBB4_4<br>
+; SSE-NEXT: jmp .LBB4_5<br>
+; SSE-NEXT: .LBB4_1:<br>
+; SSE-NEXT: # implicit-def: $xmm0<br>
+; SSE-NEXT: testb $2, %al<br>
+; SSE-NEXT: je .LBB4_5<br>
+; SSE-NEXT: .LBB4_4: # %cond.load1<br>
+; SSE-NEXT: pextrq $1, %xmm5, %rcx<br>
+; SSE-NEXT: pinsrd $1, (%rcx), %xmm0<br>
+; SSE-NEXT: .LBB4_5: # %else2<br>
+; SSE-NEXT: testb $4, %al<br>
+; SSE-NEXT: jne .LBB4_6<br>
+; SSE-NEXT: # %bb.7: # %else5<br>
+; SSE-NEXT: testb $8, %al<br>
+; SSE-NEXT: jne .LBB4_8<br>
+; SSE-NEXT: .LBB4_9: # %else8<br>
+; SSE-NEXT: testb $16, %al<br>
+; SSE-NEXT: je .LBB4_10<br>
+; SSE-NEXT: .LBB4_11: # %cond.load10<br>
+; SSE-NEXT: movq %xmm5, %rcx<br>
+; SSE-NEXT: pinsrd $0, (%rcx), %xmm1<br>
+; SSE-NEXT: testb $32, %al<br>
+; SSE-NEXT: jne .LBB4_13<br>
+; SSE-NEXT: jmp .LBB4_14<br>
+; SSE-NEXT: .LBB4_6: # %cond.load4<br>
+; SSE-NEXT: movq %xmm5, %rcx<br>
+; SSE-NEXT: pinsrd $2, (%rcx), %xmm0<br>
+; SSE-NEXT: testb $8, %al<br>
+; SSE-NEXT: je .LBB4_9<br>
+; SSE-NEXT: .LBB4_8: # %cond.load7<br>
+; SSE-NEXT: pextrq $1, %xmm5, %rcx<br>
+; SSE-NEXT: pinsrd $3, (%rcx), %xmm0<br>
+; SSE-NEXT: testb $16, %al<br>
+; SSE-NEXT: jne .LBB4_11<br>
+; SSE-NEXT: .LBB4_10:<br>
+; SSE-NEXT: # implicit-def: $xmm1<br>
+; SSE-NEXT: testb $32, %al<br>
+; SSE-NEXT: je .LBB4_14<br>
+; SSE-NEXT: .LBB4_13: # %cond.load13<br>
+; SSE-NEXT: pextrq $1, %xmm5, %rcx<br>
+; SSE-NEXT: pinsrd $1, (%rcx), %xmm1<br>
+; SSE-NEXT: .LBB4_14: # %else14<br>
+; SSE-NEXT: testb $64, %al<br>
+; SSE-NEXT: jne .LBB4_15<br>
+; SSE-NEXT: # %bb.16: # %else17<br>
+; SSE-NEXT: testb $-128, %al<br>
+; SSE-NEXT: je .LBB4_18<br>
+; SSE-NEXT: .LBB4_17: # %cond.load19<br>
+; SSE-NEXT: pextrq $1, %xmm5, %rax<br>
+; SSE-NEXT: pinsrd $3, (%rax), %xmm1<br>
+; SSE-NEXT: .LBB4_18: # %else20<br>
+; SSE-NEXT: pxor %xmm5, %xmm5<br>
+; SSE-NEXT: paddq {{.*}}(%rip), %xmm4<br>
+; SSE-NEXT: movdqa %xmm2, %xmm6<br>
+; SSE-NEXT: pcmpeqd %xmm5, %xmm6<br>
+; SSE-NEXT: pcmpeqd %xmm3, %xmm5<br>
+; SSE-NEXT: packssdw %xmm5, %xmm6<br>
+; SSE-NEXT: packsswb %xmm0, %xmm6<br>
+; SSE-NEXT: pmovmskb %xmm6, %eax<br>
+; SSE-NEXT: testb $1, %al<br>
+; SSE-NEXT: je .LBB4_19<br>
+; SSE-NEXT: # %bb.20: # %cond.load23<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: movd {{.*#+}} xmm5 = mem[0],zero,zero,zero<br>
+; SSE-NEXT: testb $2, %al<br>
+; SSE-NEXT: jne .LBB4_22<br>
+; SSE-NEXT: jmp .LBB4_23<br>
+; SSE-NEXT: .LBB4_15: # %cond.load16<br>
+; SSE-NEXT: movq %xmm5, %rcx<br>
+; SSE-NEXT: pinsrd $2, (%rcx), %xmm1<br>
+; SSE-NEXT: testb $-128, %al<br>
+; SSE-NEXT: jne .LBB4_17<br>
+; SSE-NEXT: jmp .LBB4_18<br>
+; SSE-NEXT: .LBB4_19:<br>
+; SSE-NEXT: # implicit-def: $xmm5<br>
+; SSE-NEXT: testb $2, %al<br>
+; SSE-NEXT: je .LBB4_23<br>
+; SSE-NEXT: .LBB4_22: # %cond.load29<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $1, (%rcx), %xmm5<br>
+; SSE-NEXT: .LBB4_23: # %else33<br>
+; SSE-NEXT: testb $4, %al<br>
+; SSE-NEXT: jne .LBB4_24<br>
+; SSE-NEXT: # %bb.25: # %else39<br>
+; SSE-NEXT: testb $8, %al<br>
+; SSE-NEXT: jne .LBB4_26<br>
+; SSE-NEXT: .LBB4_27: # %else45<br>
+; SSE-NEXT: testb $16, %al<br>
+; SSE-NEXT: je .LBB4_28<br>
+; SSE-NEXT: .LBB4_29: # %cond.load47<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $0, (%rcx), %xmm6<br>
+; SSE-NEXT: testb $32, %al<br>
+; SSE-NEXT: jne .LBB4_31<br>
+; SSE-NEXT: jmp .LBB4_32<br>
+; SSE-NEXT: .LBB4_24: # %cond.load35<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $2, (%rcx), %xmm5<br>
+; SSE-NEXT: testb $8, %al<br>
+; SSE-NEXT: je .LBB4_27<br>
+; SSE-NEXT: .LBB4_26: # %cond.load41<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $3, (%rcx), %xmm5<br>
+; SSE-NEXT: testb $16, %al<br>
+; SSE-NEXT: jne .LBB4_29<br>
+; SSE-NEXT: .LBB4_28:<br>
+; SSE-NEXT: # implicit-def: $xmm6<br>
+; SSE-NEXT: testb $32, %al<br>
+; SSE-NEXT: je .LBB4_32<br>
+; SSE-NEXT: .LBB4_31: # %cond.load53<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $1, (%rcx), %xmm6<br>
+; SSE-NEXT: .LBB4_32: # %else57<br>
+; SSE-NEXT: testb $64, %al<br>
+; SSE-NEXT: jne .LBB4_33<br>
+; SSE-NEXT: # %bb.34: # %else63<br>
+; SSE-NEXT: testb $-128, %al<br>
+; SSE-NEXT: je .LBB4_36<br>
+; SSE-NEXT: .LBB4_35: # %cond.load65<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rax<br>
+; SSE-NEXT: pinsrd $3, (%rax), %xmm6<br>
+; SSE-NEXT: .LBB4_36: # %else69<br>
+; SSE-NEXT: pxor %xmm7, %xmm7<br>
+; SSE-NEXT: pcmpeqd %xmm7, %xmm2<br>
+; SSE-NEXT: pcmpeqd %xmm7, %xmm3<br>
+; SSE-NEXT: packssdw %xmm3, %xmm2<br>
+; SSE-NEXT: packsswb %xmm0, %xmm2<br>
+; SSE-NEXT: pmovmskb %xmm2, %eax<br>
+; SSE-NEXT: testb $1, %al<br>
+; SSE-NEXT: je .LBB4_37<br>
+; SSE-NEXT: # %bb.38: # %cond.load72<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero<br>
+; SSE-NEXT: testb $2, %al<br>
+; SSE-NEXT: jne .LBB4_40<br>
+; SSE-NEXT: jmp .LBB4_41<br>
+; SSE-NEXT: .LBB4_33: # %cond.load59<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $2, (%rcx), %xmm6<br>
+; SSE-NEXT: testb $-128, %al<br>
+; SSE-NEXT: jne .LBB4_35<br>
+; SSE-NEXT: jmp .LBB4_36<br>
+; SSE-NEXT: .LBB4_37:<br>
+; SSE-NEXT: # implicit-def: $xmm2<br>
+; SSE-NEXT: testb $2, %al<br>
+; SSE-NEXT: je .LBB4_41<br>
+; SSE-NEXT: .LBB4_40: # %cond.load78<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $1, (%rcx), %xmm2<br>
+; SSE-NEXT: .LBB4_41: # %else82<br>
+; SSE-NEXT: testb $4, %al<br>
+; SSE-NEXT: jne .LBB4_42<br>
+; SSE-NEXT: # %bb.43: # %else88<br>
+; SSE-NEXT: testb $8, %al<br>
+; SSE-NEXT: jne .LBB4_44<br>
+; SSE-NEXT: .LBB4_45: # %else94<br>
+; SSE-NEXT: testb $16, %al<br>
+; SSE-NEXT: je .LBB4_46<br>
+; SSE-NEXT: .LBB4_47: # %cond.load96<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $0, (%rcx), %xmm3<br>
+; SSE-NEXT: testb $32, %al<br>
+; SSE-NEXT: jne .LBB4_49<br>
+; SSE-NEXT: jmp .LBB4_50<br>
+; SSE-NEXT: .LBB4_42: # %cond.load84<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $2, (%rcx), %xmm2<br>
+; SSE-NEXT: testb $8, %al<br>
+; SSE-NEXT: je .LBB4_45<br>
+; SSE-NEXT: .LBB4_44: # %cond.load90<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $3, (%rcx), %xmm2<br>
+; SSE-NEXT: testb $16, %al<br>
+; SSE-NEXT: jne .LBB4_47<br>
+; SSE-NEXT: .LBB4_46:<br>
+; SSE-NEXT: # implicit-def: $xmm3<br>
+; SSE-NEXT: testb $32, %al<br>
+; SSE-NEXT: je .LBB4_50<br>
+; SSE-NEXT: .LBB4_49: # %cond.load102<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $1, (%rcx), %xmm3<br>
+; SSE-NEXT: .LBB4_50: # %else106<br>
+; SSE-NEXT: testb $64, %al<br>
+; SSE-NEXT: je .LBB4_52<br>
+; SSE-NEXT: # %bb.51: # %cond.load108<br>
+; SSE-NEXT: movq %xmm4, %rcx<br>
+; SSE-NEXT: pinsrd $2, (%rcx), %xmm3<br>
+; SSE-NEXT: .LBB4_52: # %else112<br>
+; SSE-NEXT: paddd %xmm5, %xmm0<br>
+; SSE-NEXT: paddd %xmm6, %xmm1<br>
+; SSE-NEXT: testb $-128, %al<br>
+; SSE-NEXT: je .LBB4_54<br>
+; SSE-NEXT: # %bb.53: # %cond.load114<br>
+; SSE-NEXT: pextrq $1, %xmm4, %rax<br>
+; SSE-NEXT: pinsrd $3, (%rax), %xmm3<br>
+; SSE-NEXT: .LBB4_54: # %else118<br>
+; SSE-NEXT: paddd %xmm3, %xmm1<br>
+; SSE-NEXT: paddd %xmm2, %xmm0<br>
+; SSE-NEXT: retq<br>
+;<br>
+; AVX1-LABEL: gather_v8i32_v8i32:<br>
+; AVX1: # %bb.0:<br>
+; AVX1-NEXT: movl $c, %eax<br>
+; AVX1-NEXT: vmovq %rax, %xmm1<br>
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,1,0,1]<br>
+; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm3, %xmm1<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm4<br>
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9<br>
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1<br>
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm9, %xmm5<br>
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1<br>
+; AVX1-NEXT: vmovmskps %ymm1, %eax<br>
+; AVX1-NEXT: testb $1, %al<br>
+; AVX1-NEXT: # implicit-def: $ymm1<br>
+; AVX1-NEXT: je .LBB4_2<br>
+; AVX1-NEXT: # %bb.1: # %cond.load<br>
+; AVX1-NEXT: vmovq %xmm4, %rcx<br>
+; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero<br>
+; AVX1-NEXT: .LBB4_2: # %else<br>
+; AVX1-NEXT: testb $2, %al<br>
+; AVX1-NEXT: je .LBB4_4<br>
+; AVX1-NEXT: # %bb.3: # %cond.load1<br>
+; AVX1-NEXT: vpextrq $1, %xmm4, %rcx<br>
+; AVX1-NEXT: vpinsrd $1, (%rcx), %xmm1, %xmm5<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_4: # %else2<br>
+; AVX1-NEXT: testb $4, %al<br>
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm6<br>
+; AVX1-NEXT: jne .LBB4_5<br>
+; AVX1-NEXT: # %bb.6: # %else5<br>
+; AVX1-NEXT: testb $8, %al<br>
+; AVX1-NEXT: jne .LBB4_7<br>
+; AVX1-NEXT: .LBB4_8: # %else8<br>
+; AVX1-NEXT: testb $16, %al<br>
+; AVX1-NEXT: jne .LBB4_9<br>
+; AVX1-NEXT: .LBB4_10: # %else11<br>
+; AVX1-NEXT: testb $32, %al<br>
+; AVX1-NEXT: jne .LBB4_11<br>
+; AVX1-NEXT: .LBB4_12: # %else14<br>
+; AVX1-NEXT: testb $64, %al<br>
+; AVX1-NEXT: je .LBB4_14<br>
+; AVX1-NEXT: .LBB4_13: # %cond.load16<br>
+; AVX1-NEXT: vmovq %xmm6, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4<br>
+; AVX1-NEXT: vpinsrd $2, (%rcx), %xmm4, %xmm4<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1<br>
+; AVX1-NEXT: .LBB4_14: # %else17<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm5<br>
+; AVX1-NEXT: testb $-128, %al<br>
+; AVX1-NEXT: je .LBB4_16<br>
+; AVX1-NEXT: # %bb.15: # %cond.load19<br>
+; AVX1-NEXT: vpextrq $1, %xmm6, %rax<br>
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3<br>
+; AVX1-NEXT: vpinsrd $3, (%rax), %xmm3, %xmm3<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1<br>
+; AVX1-NEXT: .LBB4_16: # %else20<br>
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3<br>
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [28,28]<br>
+; AVX1-NEXT: vpaddq %xmm3, %xmm10, %xmm3<br>
+; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm4<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm7<br>
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4<br>
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm9, %xmm2<br>
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm4<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2<br>
+; AVX1-NEXT: vmovmskps %ymm2, %eax<br>
+; AVX1-NEXT: testb $1, %al<br>
+; AVX1-NEXT: # implicit-def: $ymm4<br>
+; AVX1-NEXT: je .LBB4_18<br>
+; AVX1-NEXT: # %bb.17: # %cond.load23<br>
+; AVX1-NEXT: vmovq %xmm7, %rcx<br>
+; AVX1-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero<br>
+; AVX1-NEXT: .LBB4_18: # %else27<br>
+; AVX1-NEXT: testb $2, %al<br>
+; AVX1-NEXT: je .LBB4_20<br>
+; AVX1-NEXT: # %bb.19: # %cond.load29<br>
+; AVX1-NEXT: vpextrq $1, %xmm7, %rcx<br>
+; AVX1-NEXT: vpinsrd $1, (%rcx), %xmm4, %xmm2<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_20: # %else33<br>
+; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm8<br>
+; AVX1-NEXT: testb $4, %al<br>
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7<br>
+; AVX1-NEXT: je .LBB4_22<br>
+; AVX1-NEXT: # %bb.21: # %cond.load35<br>
+; AVX1-NEXT: vmovq %xmm7, %rcx<br>
+; AVX1-NEXT: vpinsrd $2, (%rcx), %xmm4, %xmm2<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_22: # %else39<br>
+; AVX1-NEXT: testb $8, %al<br>
+; AVX1-NEXT: je .LBB4_24<br>
+; AVX1-NEXT: # %bb.23: # %cond.load41<br>
+; AVX1-NEXT: vpextrq $1, %xmm7, %rcx<br>
+; AVX1-NEXT: vpinsrd $3, (%rcx), %xmm4, %xmm2<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_24: # %else45<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm8, %ymm7<br>
+; AVX1-NEXT: testb $16, %al<br>
+; AVX1-NEXT: je .LBB4_26<br>
+; AVX1-NEXT: # %bb.25: # %cond.load47<br>
+; AVX1-NEXT: vmovq %xmm7, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm2<br>
+; AVX1-NEXT: vpinsrd $0, (%rcx), %xmm2, %xmm2<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm4<br>
+; AVX1-NEXT: .LBB4_26: # %else51<br>
+; AVX1-NEXT: testb $32, %al<br>
+; AVX1-NEXT: je .LBB4_28<br>
+; AVX1-NEXT: # %bb.27: # %cond.load53<br>
+; AVX1-NEXT: vpextrq $1, %xmm7, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm2<br>
+; AVX1-NEXT: vpinsrd $1, (%rcx), %xmm2, %xmm2<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm4<br>
+; AVX1-NEXT: .LBB4_28: # %else57<br>
+; AVX1-NEXT: testb $64, %al<br>
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7<br>
+; AVX1-NEXT: je .LBB4_30<br>
+; AVX1-NEXT: # %bb.29: # %cond.load59<br>
+; AVX1-NEXT: vmovq %xmm7, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm2<br>
+; AVX1-NEXT: vpinsrd $2, (%rcx), %xmm2, %xmm2<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm4<br>
+; AVX1-NEXT: .LBB4_30: # %else63<br>
+; AVX1-NEXT: testb $-128, %al<br>
+; AVX1-NEXT: je .LBB4_32<br>
+; AVX1-NEXT: # %bb.31: # %cond.load65<br>
+; AVX1-NEXT: vpextrq $1, %xmm7, %rax<br>
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm2<br>
+; AVX1-NEXT: vpinsrd $3, (%rax), %xmm2, %xmm2<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm4<br>
+; AVX1-NEXT: .LBB4_32: # %else69<br>
+; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm2<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm7<br>
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2<br>
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm9, %xmm6<br>
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0<br>
+; AVX1-NEXT: vmovmskps %ymm0, %eax<br>
+; AVX1-NEXT: testb $1, %al<br>
+; AVX1-NEXT: # implicit-def: $ymm0<br>
+; AVX1-NEXT: je .LBB4_34<br>
+; AVX1-NEXT: # %bb.33: # %cond.load72<br>
+; AVX1-NEXT: vmovq %xmm7, %rcx<br>
+; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
+; AVX1-NEXT: .LBB4_34: # %else76<br>
+; AVX1-NEXT: testb $2, %al<br>
+; AVX1-NEXT: je .LBB4_36<br>
+; AVX1-NEXT: # %bb.35: # %cond.load78<br>
+; AVX1-NEXT: vpextrq $1, %xmm7, %rcx<br>
+; AVX1-NEXT: vpinsrd $1, (%rcx), %xmm0, %xmm2<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_36: # %else82<br>
+; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm2<br>
+; AVX1-NEXT: testb $4, %al<br>
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm5<br>
+; AVX1-NEXT: je .LBB4_38<br>
+; AVX1-NEXT: # %bb.37: # %cond.load84<br>
+; AVX1-NEXT: vmovq %xmm5, %rcx<br>
+; AVX1-NEXT: vpinsrd $2, (%rcx), %xmm0, %xmm6<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_38: # %else88<br>
+; AVX1-NEXT: testb $8, %al<br>
+; AVX1-NEXT: je .LBB4_40<br>
+; AVX1-NEXT: # %bb.39: # %cond.load90<br>
+; AVX1-NEXT: vpextrq $1, %xmm5, %rcx<br>
+; AVX1-NEXT: vpinsrd $3, (%rcx), %xmm0, %xmm5<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]<br>
+; AVX1-NEXT: .LBB4_40: # %else94<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2<br>
+; AVX1-NEXT: testb $16, %al<br>
+; AVX1-NEXT: je .LBB4_42<br>
+; AVX1-NEXT: # %bb.41: # %cond.load96<br>
+; AVX1-NEXT: vmovq %xmm2, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3<br>
+; AVX1-NEXT: vpinsrd $0, (%rcx), %xmm3, %xmm3<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0<br>
+; AVX1-NEXT: .LBB4_42: # %else100<br>
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3<br>
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5<br>
+; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1<br>
+; AVX1-NEXT: testb $32, %al<br>
+; AVX1-NEXT: je .LBB4_44<br>
+; AVX1-NEXT: # %bb.43: # %cond.load102<br>
+; AVX1-NEXT: vpextrq $1, %xmm2, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4<br>
+; AVX1-NEXT: vpinsrd $1, (%rcx), %xmm4, %xmm4<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0<br>
+; AVX1-NEXT: .LBB4_44: # %else106<br>
+; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3<br>
+; AVX1-NEXT: testb $64, %al<br>
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2<br>
+; AVX1-NEXT: je .LBB4_46<br>
+; AVX1-NEXT: # %bb.45: # %cond.load108<br>
+; AVX1-NEXT: vmovq %xmm2, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4<br>
+; AVX1-NEXT: vpinsrd $2, (%rcx), %xmm4, %xmm4<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0<br>
+; AVX1-NEXT: .LBB4_46: # %else112<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1<br>
+; AVX1-NEXT: testb $-128, %al<br>
+; AVX1-NEXT: je .LBB4_48<br>
+; AVX1-NEXT: # %bb.47: # %cond.load114<br>
+; AVX1-NEXT: vpextrq $1, %xmm2, %rax<br>
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2<br>
+; AVX1-NEXT: vpinsrd $3, (%rax), %xmm2, %xmm2<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0<br>
+; AVX1-NEXT: .LBB4_48: # %else118<br>
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2<br>
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3<br>
+; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2<br>
+; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0<br>
+; AVX1-NEXT: retq<br>
+; AVX1-NEXT: .LBB4_5: # %cond.load4<br>
+; AVX1-NEXT: vmovq %xmm6, %rcx<br>
+; AVX1-NEXT: vpinsrd $2, (%rcx), %xmm1, %xmm5<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]<br>
+; AVX1-NEXT: testb $8, %al<br>
+; AVX1-NEXT: je .LBB4_8<br>
+; AVX1-NEXT: .LBB4_7: # %cond.load7<br>
+; AVX1-NEXT: vpextrq $1, %xmm6, %rcx<br>
+; AVX1-NEXT: vpinsrd $3, (%rcx), %xmm1, %xmm5<br>
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]<br>
+; AVX1-NEXT: testb $16, %al<br>
+; AVX1-NEXT: je .LBB4_10<br>
+; AVX1-NEXT: .LBB4_9: # %cond.load10<br>
+; AVX1-NEXT: vmovq %xmm4, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5<br>
+; AVX1-NEXT: vpinsrd $0, (%rcx), %xmm5, %xmm5<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1<br>
+; AVX1-NEXT: testb $32, %al<br>
+; AVX1-NEXT: je .LBB4_12<br>
+; AVX1-NEXT: .LBB4_11: # %cond.load13<br>
+; AVX1-NEXT: vpextrq $1, %xmm4, %rcx<br>
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4<br>
+; AVX1-NEXT: vpinsrd $1, (%rcx), %xmm4, %xmm4<br>
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1<br>
+; AVX1-NEXT: testb $64, %al<br>
+; AVX1-NEXT: jne .LBB4_13<br>
+; AVX1-NEXT: jmp .LBB4_14<br>
+;<br>
+; AVX2-LABEL: gather_v8i32_v8i32:<br>
+; AVX2: # %bb.0:<br>
+; AVX2-NEXT: movl $c, %eax<br>
+; AVX2-NEXT: vmovq %rax, %xmm1<br>
+; AVX2-NEXT: vpbroadcastq %xmm1, %ymm2<br>
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [12,12,12,12]<br>
+; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm3<br>
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1<br>
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm1<br>
+; AVX2-NEXT: vmovmskps %ymm1, %eax<br>
+; AVX2-NEXT: testb $1, %al<br>
+; AVX2-NEXT: # implicit-def: $ymm1<br>
+; AVX2-NEXT: je .LBB4_2<br>
+; AVX2-NEXT: # %bb.1: # %cond.load<br>
+; AVX2-NEXT: vmovq %xmm3, %rcx<br>
+; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero<br>
+; AVX2-NEXT: .LBB4_2: # %else<br>
+; AVX2-NEXT: testb $2, %al<br>
+; AVX2-NEXT: je .LBB4_4<br>
+; AVX2-NEXT: # %bb.3: # %cond.load1<br>
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx<br>
+; AVX2-NEXT: vpinsrd $1, (%rcx), %xmm1, %xmm4<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]<br>
+; AVX2-NEXT: .LBB4_4: # %else2<br>
+; AVX2-NEXT: testb $4, %al<br>
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4<br>
+; AVX2-NEXT: jne .LBB4_5<br>
+; AVX2-NEXT: # %bb.6: # %else5<br>
+; AVX2-NEXT: testb $8, %al<br>
+; AVX2-NEXT: jne .LBB4_7<br>
+; AVX2-NEXT: .LBB4_8: # %else8<br>
+; AVX2-NEXT: testb $16, %al<br>
+; AVX2-NEXT: jne .LBB4_9<br>
+; AVX2-NEXT: .LBB4_10: # %else11<br>
+; AVX2-NEXT: testb $32, %al<br>
+; AVX2-NEXT: jne .LBB4_11<br>
+; AVX2-NEXT: .LBB4_12: # %else14<br>
+; AVX2-NEXT: testb $64, %al<br>
+; AVX2-NEXT: jne .LBB4_13<br>
+; AVX2-NEXT: .LBB4_14: # %else17<br>
+; AVX2-NEXT: testb $-128, %al<br>
+; AVX2-NEXT: je .LBB4_16<br>
+; AVX2-NEXT: .LBB4_15: # %cond.load19<br>
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax<br>
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3<br>
+; AVX2-NEXT: vpinsrd $3, (%rax), %xmm3, %xmm3<br>
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1<br>
+; AVX2-NEXT: .LBB4_16: # %else20<br>
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4<br>
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [28,28,28,28]<br>
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm3<br>
+; AVX2-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm2<br>
+; AVX2-NEXT: vmovmskps %ymm2, %eax<br>
+; AVX2-NEXT: testb $1, %al<br>
+; AVX2-NEXT: # implicit-def: $ymm2<br>
+; AVX2-NEXT: je .LBB4_18<br>
+; AVX2-NEXT: # %bb.17: # %cond.load23<br>
+; AVX2-NEXT: vmovq %xmm3, %rcx<br>
+; AVX2-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero<br>
+; AVX2-NEXT: .LBB4_18: # %else27<br>
+; AVX2-NEXT: testb $2, %al<br>
+; AVX2-NEXT: je .LBB4_20<br>
+; AVX2-NEXT: # %bb.19: # %cond.load29<br>
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx<br>
+; AVX2-NEXT: vpinsrd $1, (%rcx), %xmm2, %xmm4<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]<br>
+; AVX2-NEXT: .LBB4_20: # %else33<br>
+; AVX2-NEXT: testb $4, %al<br>
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4<br>
+; AVX2-NEXT: jne .LBB4_21<br>
+; AVX2-NEXT: # %bb.22: # %else39<br>
+; AVX2-NEXT: testb $8, %al<br>
+; AVX2-NEXT: jne .LBB4_23<br>
+; AVX2-NEXT: .LBB4_24: # %else45<br>
+; AVX2-NEXT: testb $16, %al<br>
+; AVX2-NEXT: jne .LBB4_25<br>
+; AVX2-NEXT: .LBB4_26: # %else51<br>
+; AVX2-NEXT: testb $32, %al<br>
+; AVX2-NEXT: jne .LBB4_27<br>
+; AVX2-NEXT: .LBB4_28: # %else57<br>
+; AVX2-NEXT: testb $64, %al<br>
+; AVX2-NEXT: jne .LBB4_29<br>
+; AVX2-NEXT: .LBB4_30: # %else63<br>
+; AVX2-NEXT: testb $-128, %al<br>
+; AVX2-NEXT: je .LBB4_32<br>
+; AVX2-NEXT: .LBB4_31: # %cond.load65<br>
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax<br>
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5<br>
+; AVX2-NEXT: vpinsrd $3, (%rax), %xmm5, %xmm5<br>
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2<br>
+; AVX2-NEXT: .LBB4_32: # %else69<br>
+; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5<br>
+; AVX2-NEXT: vpcmpeqd %ymm5, %ymm0, %ymm0<br>
+; AVX2-NEXT: vmovmskps %ymm0, %eax<br>
+; AVX2-NEXT: testb $1, %al<br>
+; AVX2-NEXT: # implicit-def: $ymm0<br>
+; AVX2-NEXT: jne .LBB4_33<br>
+; AVX2-NEXT: # %bb.34: # %else76<br>
+; AVX2-NEXT: testb $2, %al<br>
+; AVX2-NEXT: jne .LBB4_35<br>
+; AVX2-NEXT: .LBB4_36: # %else82<br>
+; AVX2-NEXT: testb $4, %al<br>
+; AVX2-NEXT: jne .LBB4_37<br>
+; AVX2-NEXT: .LBB4_38: # %else88<br>
+; AVX2-NEXT: testb $8, %al<br>
+; AVX2-NEXT: jne .LBB4_39<br>
+; AVX2-NEXT: .LBB4_40: # %else94<br>
+; AVX2-NEXT: testb $16, %al<br>
+; AVX2-NEXT: jne .LBB4_41<br>
+; AVX2-NEXT: .LBB4_42: # %else100<br>
+; AVX2-NEXT: testb $32, %al<br>
+; AVX2-NEXT: jne .LBB4_43<br>
+; AVX2-NEXT: .LBB4_44: # %else106<br>
+; AVX2-NEXT: testb $64, %al<br>
+; AVX2-NEXT: je .LBB4_46<br>
+; AVX2-NEXT: .LBB4_45: # %cond.load108<br>
+; AVX2-NEXT: vmovq %xmm4, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3<br>
+; AVX2-NEXT: vpinsrd $2, (%rcx), %xmm3, %xmm3<br>
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0<br>
+; AVX2-NEXT: .LBB4_46: # %else112<br>
+; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1<br>
+; AVX2-NEXT: testb $-128, %al<br>
+; AVX2-NEXT: je .LBB4_48<br>
+; AVX2-NEXT: # %bb.47: # %cond.load114<br>
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax<br>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2<br>
+; AVX2-NEXT: vpinsrd $3, (%rax), %xmm2, %xmm2<br>
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0<br>
+; AVX2-NEXT: .LBB4_48: # %else118<br>
+; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0<br>
+; AVX2-NEXT: retq<br>
+; AVX2-NEXT: .LBB4_5: # %cond.load4<br>
+; AVX2-NEXT: vmovq %xmm4, %rcx<br>
+; AVX2-NEXT: vpinsrd $2, (%rcx), %xmm1, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]<br>
+; AVX2-NEXT: testb $8, %al<br>
+; AVX2-NEXT: je .LBB4_8<br>
+; AVX2-NEXT: .LBB4_7: # %cond.load7<br>
+; AVX2-NEXT: vpextrq $1, %xmm4, %rcx<br>
+; AVX2-NEXT: vpinsrd $3, (%rcx), %xmm1, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]<br>
+; AVX2-NEXT: testb $16, %al<br>
+; AVX2-NEXT: je .LBB4_10<br>
+; AVX2-NEXT: .LBB4_9: # %cond.load10<br>
+; AVX2-NEXT: vmovq %xmm3, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5<br>
+; AVX2-NEXT: vpinsrd $0, (%rcx), %xmm5, %xmm5<br>
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1<br>
+; AVX2-NEXT: testb $32, %al<br>
+; AVX2-NEXT: je .LBB4_12<br>
+; AVX2-NEXT: .LBB4_11: # %cond.load13<br>
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3<br>
+; AVX2-NEXT: vpinsrd $1, (%rcx), %xmm3, %xmm3<br>
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1<br>
+; AVX2-NEXT: testb $64, %al<br>
+; AVX2-NEXT: je .LBB4_14<br>
+; AVX2-NEXT: .LBB4_13: # %cond.load16<br>
+; AVX2-NEXT: vmovq %xmm4, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3<br>
+; AVX2-NEXT: vpinsrd $2, (%rcx), %xmm3, %xmm3<br>
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1<br>
+; AVX2-NEXT: testb $-128, %al<br>
+; AVX2-NEXT: jne .LBB4_15<br>
+; AVX2-NEXT: jmp .LBB4_16<br>
+; AVX2-NEXT: .LBB4_21: # %cond.load35<br>
+; AVX2-NEXT: vmovq %xmm4, %rcx<br>
+; AVX2-NEXT: vpinsrd $2, (%rcx), %xmm2, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]<br>
+; AVX2-NEXT: testb $8, %al<br>
+; AVX2-NEXT: je .LBB4_24<br>
+; AVX2-NEXT: .LBB4_23: # %cond.load41<br>
+; AVX2-NEXT: vpextrq $1, %xmm4, %rcx<br>
+; AVX2-NEXT: vpinsrd $3, (%rcx), %xmm2, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]<br>
+; AVX2-NEXT: testb $16, %al<br>
+; AVX2-NEXT: je .LBB4_26<br>
+; AVX2-NEXT: .LBB4_25: # %cond.load47<br>
+; AVX2-NEXT: vmovq %xmm3, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5<br>
+; AVX2-NEXT: vpinsrd $0, (%rcx), %xmm5, %xmm5<br>
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2<br>
+; AVX2-NEXT: testb $32, %al<br>
+; AVX2-NEXT: je .LBB4_28<br>
+; AVX2-NEXT: .LBB4_27: # %cond.load53<br>
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5<br>
+; AVX2-NEXT: vpinsrd $1, (%rcx), %xmm5, %xmm5<br>
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2<br>
+; AVX2-NEXT: testb $64, %al<br>
+; AVX2-NEXT: je .LBB4_30<br>
+; AVX2-NEXT: .LBB4_29: # %cond.load59<br>
+; AVX2-NEXT: vmovq %xmm4, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5<br>
+; AVX2-NEXT: vpinsrd $2, (%rcx), %xmm5, %xmm5<br>
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm2, %ymm2<br>
+; AVX2-NEXT: testb $-128, %al<br>
+; AVX2-NEXT: jne .LBB4_31<br>
+; AVX2-NEXT: jmp .LBB4_32<br>
+; AVX2-NEXT: .LBB4_33: # %cond.load72<br>
+; AVX2-NEXT: vmovq %xmm3, %rcx<br>
+; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
+; AVX2-NEXT: testb $2, %al<br>
+; AVX2-NEXT: je .LBB4_36<br>
+; AVX2-NEXT: .LBB4_35: # %cond.load78<br>
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx<br>
+; AVX2-NEXT: vpinsrd $1, (%rcx), %xmm0, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]<br>
+; AVX2-NEXT: testb $4, %al<br>
+; AVX2-NEXT: je .LBB4_38<br>
+; AVX2-NEXT: .LBB4_37: # %cond.load84<br>
+; AVX2-NEXT: vmovq %xmm4, %rcx<br>
+; AVX2-NEXT: vpinsrd $2, (%rcx), %xmm0, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]<br>
+; AVX2-NEXT: testb $8, %al<br>
+; AVX2-NEXT: je .LBB4_40<br>
+; AVX2-NEXT: .LBB4_39: # %cond.load90<br>
+; AVX2-NEXT: vpextrq $1, %xmm4, %rcx<br>
+; AVX2-NEXT: vpinsrd $3, (%rcx), %xmm0, %xmm5<br>
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]<br>
+; AVX2-NEXT: testb $16, %al<br>
+; AVX2-NEXT: je .LBB4_42<br>
+; AVX2-NEXT: .LBB4_41: # %cond.load96<br>
+; AVX2-NEXT: vmovq %xmm3, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5<br>
+; AVX2-NEXT: vpinsrd $0, (%rcx), %xmm5, %xmm5<br>
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0<br>
+; AVX2-NEXT: testb $32, %al<br>
+; AVX2-NEXT: je .LBB4_44<br>
+; AVX2-NEXT: .LBB4_43: # %cond.load102<br>
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx<br>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3<br>
+; AVX2-NEXT: vpinsrd $1, (%rcx), %xmm3, %xmm3<br>
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0<br>
+; AVX2-NEXT: testb $64, %al<br>
+; AVX2-NEXT: jne .LBB4_45<br>
+; AVX2-NEXT: jmp .LBB4_46<br>
+;<br>
+; AVX512-LABEL: gather_v8i32_v8i32:<br>
+; AVX512: # %bb.0:<br>
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0<br>
+; AVX512-NEXT: vptestnmd %zmm0, %zmm0, %k1<br>
+; AVX512-NEXT: kshiftlw $8, %k1, %k0<br>
+; AVX512-NEXT: kshiftrw $8, %k0, %k2<br>
+; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm0 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]<br>
+; AVX512-NEXT: vpgatherdd c(,%zmm0,4), %zmm1 {%k2}<br>
+; AVX512-NEXT: vpbroadcastq $28, %zmm0<br>
+; AVX512-NEXT: vpgatherqd c(,%zmm0), %ymm2 {%k1}<br>
+; AVX512-NEXT: vpaddd %ymm2, %ymm2, %ymm0<br>
+; AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0<br>
+; AVX512-NEXT: retq<br>
+ %1 = insertelement <8 x %struct.a*> undef, %struct.a* @c, i32 0<br>
+ %2 = shufflevector <8 x %struct.a*> %1, <8 x %struct.a*> undef, <8 x i32> zeroinitializer<br>
+ %3 = getelementptr %struct.a, <8 x %struct.a*> %2, <8 x i32> zeroinitializer, i32 0, i32 3<br>
+ %4 = icmp eq <8 x i32> %trigger, zeroinitializer<br>
+ %5 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %3, i32 4, <8 x i1> %4, <8 x i32> undef)<br>
+ %6 = getelementptr %struct.a, <8 x %struct.a*> %2, <8 x i32> zeroinitializer, i32 3<br>
+ %7 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %6, i32 4, <8 x i1> %4, <8 x i32> undef)<br>
+ %8 = add <8 x i32> %5, %7<br>
+ %9 = getelementptr %struct.a, <8 x %struct.a*> %2, i32 0, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3><br>
+ %10 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %9, i32 4, <8 x i1> %4, <8 x i32> undef)<br>
+ %11 = add <8 x i32> %8, %10<br>
+ ret <8 x i32> %11<br>
+}<br>
+<br>
declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*>, i32, <2 x i1>, <2 x double>)<br>
declare <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*>, i32, <4 x i1>, <4 x double>)<br>
<br>
@@ -1023,3 +1756,5 @@ declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1<br>
declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <8 x i1>, <8 x float>)<br>
<br>
declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)<br>
+<br>
+declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>)<br>
<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a><br>
<a href="https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
</blockquote></div>