[llvm] r337144 - [x86/SLH] Teach speculative load hardening to correctly harden the
Chandler Carruth via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 15 21:17:51 PDT 2018
Author: chandlerc
Date: Sun Jul 15 21:17:51 2018
New Revision: 337144
URL: http://llvm.org/viewvc/llvm-project?rev=337144&view=rev
Log:
[x86/SLH] Teach speculative load hardening to correctly harden the
indices used by AVX2 and AVX-512 gather instructions.
The index vector is hardened by broadcasting the predicate state
into a vector register and then or-ing. We don't even have to worry
about EFLAGS here.
I've added a test for all of the gather intrinsics to make sure that we
don't miss one. A particularly interesting creation is the gather
prefetch, which needs to be marked as potentially "loading" to get the
correct behavior. It's a memory access in many ways, and is actually
relevant for SLH. Based on discussion with Craig in review, I've moved
it to be `mayLoad` and `mayStore` rather than generic side effects. This
matches how we model other prefetch instructions.
Many thanks to Craig for the review here.
Differential Revision: https://reviews.llvm.org/D49336
Added:
llvm/trunk/test/CodeGen/X86/speculative-load-hardening-gather.ll
Modified:
llvm/trunk/lib/Target/X86/X86InstrAVX512.td
llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp
Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=337144&r1=337143&r2=337144&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Sun Jul 15 21:17:51 2018
@@ -9588,7 +9588,7 @@ defm VPSCATTER : avx512_scatter_q_pd<0xA
// prefetch
multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
RegisterClass KRC, X86MemOperand memop> {
- let Predicates = [HasPFI], hasSideEffects = 1 in
+ let Predicates = [HasPFI], mayLoad = 1, mayStore = 1 in
def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
!strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"), []>,
EVEX, EVEX_K, Sched<[WriteLoad]>;
Modified: llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp?rev=337144&r1=337143&r2=337144&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86SpeculativeLoadHardening.cpp Sun Jul 15 21:17:51 2018
@@ -1398,29 +1398,104 @@ void X86SpeculativeLoadHardeningPass::ha
}
for (MachineOperand *Op : HardenOpRegs) {
- auto *OpRC = MRI->getRegClass(Op->getReg());
-
unsigned OpReg = Op->getReg();
+ auto *OpRC = MRI->getRegClass(OpReg);
unsigned TmpReg = MRI->createVirtualRegister(OpRC);
- if (!EFLAGSLive) {
- // Merge our potential poison state into the value with an or.
- auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
- .addReg(StateReg)
- .addReg(OpReg);
- OrI->addRegisterDead(X86::EFLAGS, TRI);
+ // If this is a vector register, we'll need somewhat custom logic to handle
+ // hardening it.
+ if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
+ OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
+ assert(Subtarget->hasAVX2() && "AVX2-specific register classes!");
+ bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
+
+ // Move our state into a vector register.
+ // FIXME: We could skip this at the cost of longer encodings with AVX-512
+ // but that doesn't seem likely worth it.
+ unsigned VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
+ auto MovI =
+ BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
+ .addReg(StateReg);
+ (void)MovI;
+ ++NumInstsInserted;
+ LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n");
+
+ // Broadcast it across the vector register.
+ unsigned VBStateReg = MRI->createVirtualRegister(OpRC);
+ auto BroadcastI = BuildMI(MBB, InsertPt, Loc,
+ TII->get(Is128Bit ? X86::VPBROADCASTQrr
+ : X86::VPBROADCASTQYrr),
+ VBStateReg)
+ .addReg(VStateReg);
+ (void)BroadcastI;
+ ++NumInstsInserted;
+ LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
+ dbgs() << "\n");
+
+ // Merge our potential poison state into the value with a vector or.
+ auto OrI =
+ BuildMI(MBB, InsertPt, Loc,
+ TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
+ .addReg(VBStateReg)
+ .addReg(OpReg);
+ (void)OrI;
++NumInstsInserted;
LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
- } else {
- // We need to avoid touching EFLAGS so shift out all but the least
- // significant bit using the instruction that doesn't update flags.
- auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
- .addReg(OpReg)
- .addReg(StateReg);
- (void)ShiftI;
+ } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
+ OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
+ OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
+ assert(Subtarget->hasAVX512() && "AVX512-specific register classes!");
+ bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
+ bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
+ if (Is128Bit || Is256Bit)
+ assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!");
+
+ // Broadcast our state into a vector register.
+ unsigned VStateReg = MRI->createVirtualRegister(OpRC);
+ unsigned BroadcastOp =
+ Is128Bit ? X86::VPBROADCASTQrZ128r
+ : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr;
+ auto BroadcastI =
+ BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
+ .addReg(StateReg);
+ (void)BroadcastI;
++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
+ LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
dbgs() << "\n");
+
+ // Merge our potential poison state into the value with a vector or.
+ unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
+ : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
+ auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
+ .addReg(VStateReg)
+ .addReg(OpReg);
+ ++NumInstsInserted;
+ LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
+ } else {
+ // FIXME: Need to support GR32 here for 32-bit code.
+ assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
+ "Not a supported register class for address hardening!");
+
+ if (!EFLAGSLive) {
+ // Merge our potential poison state into the value with an or.
+ auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
+ .addReg(StateReg)
+ .addReg(OpReg);
+ OrI->addRegisterDead(X86::EFLAGS, TRI);
+ ++NumInstsInserted;
+ LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
+ } else {
+ // We need to avoid touching EFLAGS so shift out all but the least
+ // significant bit using the instruction that doesn't update flags.
+ auto ShiftI =
+ BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
+ .addReg(OpReg)
+ .addReg(StateReg);
+ (void)ShiftI;
+ ++NumInstsInserted;
+ LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
+ dbgs() << "\n");
+ }
}
// Record this register as checked and update the operand.
Added: llvm/trunk/test/CodeGen/X86/speculative-load-hardening-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/speculative-load-hardening-gather.ll?rev=337144&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/speculative-load-hardening-gather.ll (added)
+++ llvm/trunk/test/CodeGen/X86/speculative-load-hardening-gather.ll Sun Jul 15 21:17:51 2018
@@ -0,0 +1,955 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening | FileCheck %s
+
+declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x float>, i8)
+
+define <4 x float> @test_llvm_x86_avx2_gather_d_ps(i8* %b, <4 x i32> %iv, <4 x float> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_ps:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vgatherdps %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> zeroinitializer, i8* %b, <4 x i32> %iv, <4 x float> %mask, i8 1)
+ ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*, <2 x i64>, <4 x float>, i8)
+
+define <4 x float> @test_llvm_x86_avx2_gather_q_ps(i8* %b, <2 x i64> %iv, <4 x float> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_ps:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vgatherqps %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> zeroinitializer, i8* %b, <2 x i64> %iv, <4 x float> %mask, i8 1)
+ ret <4 x float> %v
+}
+
+declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*, <4 x i32>, <2 x double>, i8)
+
+define <2 x double> @test_llvm_x86_avx2_gather_d_pd(i8* %b, <4 x i32> %iv, <2 x double> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_pd:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, i8* %b, <4 x i32> %iv, <2 x double> %mask, i8 1)
+ ret <2 x double> %v
+}
+
+declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*, <2 x i64>, <2 x double>, i8)
+
+define <2 x double> @test_llvm_x86_avx2_gather_q_pd(i8* %b, <2 x i64> %iv, <2 x double> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_pd:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vgatherqpd %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> zeroinitializer, i8* %b, <2 x i64> %iv, <2 x double> %mask, i8 1)
+ ret <2 x double> %v
+}
+
+declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>, <8 x float>, i8)
+
+define <8 x float> @test_llvm_x86_avx2_gather_d_ps_256(i8* %b, <8 x i32> %iv, <8 x float> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_ps_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3
+; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0
+; CHECK-NEXT: vgatherdps %ymm1, (%rdi,%ymm0), %ymm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %ymm2, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> zeroinitializer, i8* %b, <8 x i32> %iv, <8 x float> %mask, i8 1)
+ ret <8 x float> %v
+}
+
+declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>, <4 x float>, i8)
+
+define <4 x float> @test_llvm_x86_avx2_gather_q_ps_256(i8* %b, <4 x i64> %iv, <4 x float> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_ps_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3
+; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0
+; CHECK-NEXT: vgatherqps %xmm1, (%rdi,%ymm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> zeroinitializer, i8* %b, <4 x i64> %iv, <4 x float> %mask, i8 1)
+ ret <4 x float> %v
+}
+
+declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*, <4 x i32>, <4 x double>, i8)
+
+define <4 x double> @test_llvm_x86_avx2_gather_d_pd_256(i8* %b, <4 x i32> %iv, <4 x double> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_pd_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0), %ymm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %ymm2, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> zeroinitializer, i8* %b, <4 x i32> %iv, <4 x double> %mask, i8 1)
+ ret <4 x double> %v
+}
+
+declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*, <4 x i64>, <4 x double>, i8)
+
+define <4 x double> @test_llvm_x86_avx2_gather_q_pd_256(i8* %b, <4 x i64> %iv, <4 x double> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_pd_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3
+; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0
+; CHECK-NEXT: vgatherqpd %ymm1, (%rdi,%ymm0), %ymm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %ymm2, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> zeroinitializer, i8* %b, <4 x i64> %iv, <4 x double> %mask, i8 1)
+ ret <4 x double> %v
+}
+
+declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>, i8)
+
+define <4 x i32> @test_llvm_x86_avx2_gather_d_d(i8* %b, <4 x i32> %iv, <4 x i32> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vpgatherdd %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, i8* %b, <4 x i32> %iv, <4 x i32> %mask, i8 1)
+ ret <4 x i32> %v
+}
+
+declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*, <2 x i64>, <4 x i32>, i8)
+
+define <4 x i32> @test_llvm_x86_avx2_gather_q_d(i8* %b, <2 x i64> %iv, <4 x i32> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vpgatherqd %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> zeroinitializer, i8* %b, <2 x i64> %iv, <4 x i32> %mask, i8 1)
+ ret <4 x i32> %v
+}
+
+declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*, <4 x i32>, <2 x i64>, i8)
+
+define <2 x i64> @test_llvm_x86_avx2_gather_d_q(i8* %b, <4 x i32> %iv, <2 x i64> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_q:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vpgatherdq %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> zeroinitializer, i8* %b, <4 x i32> %iv, <2 x i64> %mask, i8 1)
+ ret <2 x i64> %v
+}
+
+declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64> @test_llvm_x86_avx2_gather_q_q(i8* %b, <2 x i64> %iv, <2 x i64> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_q:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vpgatherqq %xmm1, (%rdi,%xmm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> zeroinitializer, i8* %b, <2 x i64> %iv, <2 x i64> %mask, i8 1)
+ ret <2 x i64> %v
+}
+
+declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*, <8 x i32>, <8 x i32>, i8)
+
+define <8 x i32> @test_llvm_x86_avx2_gather_d_d_256(i8* %b, <8 x i32> %iv, <8 x i32> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_d_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3
+; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0
+; CHECK-NEXT: vpgatherdd %ymm1, (%rdi,%ymm0), %ymm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> zeroinitializer, i8* %b, <8 x i32> %iv, <8 x i32> %mask, i8 1)
+ ret <8 x i32> %v
+}
+
+declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*, <4 x i64>, <4 x i32>, i8)
+
+define <4 x i32> @test_llvm_x86_avx2_gather_q_d_256(i8* %b, <4 x i64> %iv, <4 x i32> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_d_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3
+; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0
+; CHECK-NEXT: vpgatherqd %xmm1, (%rdi,%ymm0), %xmm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> zeroinitializer, i8* %b, <4 x i64> %iv, <4 x i32> %mask, i8 1)
+ ret <4 x i32> %v
+}
+
+declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*, <4 x i32>, <4 x i64>, i8)
+
+define <4 x i64> @test_llvm_x86_avx2_gather_d_q_256(i8* %b, <4 x i32> %iv, <4 x i64> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_d_q_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %xmm3
+; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vpgatherdq %ymm1, (%rdi,%xmm0), %ymm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> zeroinitializer, i8* %b, <4 x i32> %iv, <4 x i64> %mask, i8 1)
+ ret <4 x i64> %v
+}
+
+declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64> @test_llvm_x86_avx2_gather_q_q_256(i8* %b, <4 x i64> %iv, <4 x i64> %mask) #0 {
+; CHECK-LABEL: test_llvm_x86_avx2_gather_q_q_256:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm3
+; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3
+; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0
+; CHECK-NEXT: vpgatherqq %ymm1, (%rdi,%ymm0), %ymm2
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> zeroinitializer, i8* %b, <4 x i64> %iv, <4 x i64> %mask, i8 1)
+ ret <4 x i64> %v
+}
+
+declare <16 x float> @llvm.x86.avx512.gather.dps.512(<16 x float>, i8*, <16 x i32>, i16, i32)
+
+define <16 x float> @test_llvm_x86_avx512_gather_dps_512(i8* %b, <16 x i32> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_dps_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm2
+; CHECK-NEXT: vporq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vgatherdps (%rdi,%zmm0), %zmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %zmm1, %zmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <16 x float> @llvm.x86.avx512.gather.dps.512(<16 x float> zeroinitializer, i8* %b, <16 x i32> %iv, i16 -1, i32 1)
+ ret <16 x float> %v
+}
+
+declare <8 x double> @llvm.x86.avx512.gather.dpd.512(<8 x double>, i8*, <8 x i32>, i8, i32)
+
+define <8 x double> @test_llvm_x86_avx512_gather_dpd_512(i8* %b, <8 x i32> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_dpd_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm2
+; CHECK-NEXT: vpbroadcastq %xmm2, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vgatherdpd (%rdi,%ymm0), %zmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %zmm1, %zmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x double> @llvm.x86.avx512.gather.dpd.512(<8 x double> zeroinitializer, i8* %b, <8 x i32> %iv, i8 -1, i32 1)
+ ret <8 x double> %v
+}
+
+declare <8 x float> @llvm.x86.avx512.gather.qps.512(<8 x float>, i8*, <8 x i64>, i8, i32)
+
+define <8 x float> @test_llvm_x86_avx512_gather_qps_512(i8* %b, <8 x i64> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_qps_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm2
+; CHECK-NEXT: vporq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vgatherqps (%rdi,%zmm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x float> @llvm.x86.avx512.gather.qps.512(<8 x float> zeroinitializer, i8* %b, <8 x i64> %iv, i8 -1, i32 1)
+ ret <8 x float> %v
+}
+
+declare <8 x double> @llvm.x86.avx512.gather.qpd.512(<8 x double>, i8*, <8 x i64>, i8, i32)
+
+define <8 x double> @test_llvm_x86_avx512_gather_qpd_512(i8* %b, <8 x i64> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_qpd_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm2
+; CHECK-NEXT: vporq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vgatherqpd (%rdi,%zmm0), %zmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %zmm1, %zmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x double> @llvm.x86.avx512.gather.qpd.512(<8 x double> zeroinitializer, i8* %b, <8 x i64> %iv, i8 -1, i32 1)
+ ret <8 x double> %v
+}
+
+declare <16 x i32> @llvm.x86.avx512.gather.dpi.512(<16 x i32>, i8*, <16 x i32>, i16, i32)
+
+define <16 x i32> @test_llvm_x86_avx512_gather_dpi_512(i8* %b, <16 x i32> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_dpi_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm2
+; CHECK-NEXT: vporq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpgatherdd (%rdi,%zmm0), %zmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <16 x i32> @llvm.x86.avx512.gather.dpi.512(<16 x i32> zeroinitializer, i8* %b, <16 x i32> %iv, i16 -1, i32 1)
+ ret <16 x i32> %v
+}
+
+declare <8 x i64> @llvm.x86.avx512.gather.dpq.512(<8 x i64>, i8*, <8 x i32>, i8, i32)
+
+define <8 x i64> @test_llvm_x86_avx512_gather_dpq_512(i8* %b, <8 x i32> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_dpq_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vmovq %rax, %xmm2
+; CHECK-NEXT: vpbroadcastq %xmm2, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vpgatherdq (%rdi,%ymm0), %zmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x i64> @llvm.x86.avx512.gather.dpq.512(<8 x i64> zeroinitializer, i8* %b, <8 x i32> %iv, i8 -1, i32 1)
+ ret <8 x i64> %v
+}
+
+
+declare <8 x i32> @llvm.x86.avx512.gather.qpi.512(<8 x i32>, i8*, <8 x i64>, i8, i32)
+
+define <8 x i32> @test_llvm_x86_avx512_gather_qpi_512(i8* %b, <8 x i64> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_qpi_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm2
+; CHECK-NEXT: vporq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpgatherqd (%rdi,%zmm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x i32> @llvm.x86.avx512.gather.qpi.512(<8 x i32> zeroinitializer, i8* %b, <8 x i64> %iv, i8 -1, i32 1)
+ ret <8 x i32> %v
+}
+
+declare <8 x i64> @llvm.x86.avx512.gather.qpq.512(<8 x i64>, i8*, <8 x i64>, i8, i32)
+
+define <8 x i64> @test_llvm_x86_avx512_gather_qpq_512(i8* %b, <8 x i64> %iv) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather_qpq_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm2
+; CHECK-NEXT: vporq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpgatherqq (%rdi,%zmm0), %zmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x i64> @llvm.x86.avx512.gather.qpq.512(<8 x i64> zeroinitializer, i8* %b, <8 x i64> %iv, i8 -1, i32 1)
+ ret <8 x i64> %v
+}
+
+declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, i8*, i32, i32);
+
+define void @test_llvm_x86_avx512_gatherpf_qps_512(<8 x i64> %iv, i8* %b) #1 {
+; CHECK-LABEL: test_llvm_x86_avx512_gatherpf_qps_512:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %zmm1
+; CHECK-NEXT: vporq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %iv, i8* %b, i32 4, i32 3)
+ ret void
+}
+
+declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, i8*, <4 x i32>, i8, i32)
+
+define <4 x float> @test_llvm_x86_avx512_gather3siv4_sf(i8* %b, <4 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv4_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vgatherdps (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> zeroinitializer, i8* %b, <4 x i32> %iv, i8 -1, i32 1)
+ ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float>, i8*, <2 x i64>, i8, i32)
+
+define <4 x float> @test_llvm_x86_avx512_gather3div4_sf(i8* %b, <2 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div4_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vgatherqps (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> zeroinitializer, i8* %b, <2 x i64> %iv, i8 -1, i32 1)
+ ret <4 x float> %v
+}
+
+declare <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double>, i8*, <4 x i32>, i8, i32)
+
+define <2 x double> @test_llvm_x86_avx512_gather3siv2_df(i8* %b, <4 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv2_df:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vgatherdpd (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> zeroinitializer, i8* %b, <4 x i32> %iv, i8 -1, i32 1)
+ ret <2 x double> %v
+}
+
+declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, i8*, <2 x i64>, i8, i32)
+
+define <2 x double> @test_llvm_x86_avx512_gather3div2_df(i8* %b, <2 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div2_df:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vgatherqpd (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> zeroinitializer, i8* %b, <2 x i64> %iv, i8 -1, i32 1)
+ ret <2 x double> %v
+}
+
+declare <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float>, i8*, <8 x i32>, i8, i32)
+
+define <8 x float> @test_llvm_x86_avx512_gather3siv8_sf(i8* %b, <8 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv8_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vgatherdps (%rdi,%ymm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> zeroinitializer, i8* %b, <8 x i32> %iv, i8 -1, i32 1)
+ ret <8 x float> %v
+}
+
+declare <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float>, i8*, <4 x i64>, i8, i32)
+
+define <4 x float> @test_llvm_x86_avx512_gather3div8_sf(i8* %b, <4 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div8_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vgatherqps (%rdi,%ymm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> zeroinitializer, i8* %b, <4 x i64> %iv, i8 -1, i32 1)
+ ret <4 x float> %v
+}
+
+declare <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double>, i8*, <4 x i32>, i8, i32)
+
+define <4 x double> @test_llvm_x86_avx512_gather3siv4_df(i8* %b, <4 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv4_df:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vgatherdpd (%rdi,%xmm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> zeroinitializer, i8* %b, <4 x i32> %iv, i8 -1, i32 1)
+ ret <4 x double> %v
+}
+
+declare <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double>, i8*, <4 x i64>, i8, i32)
+
+define <4 x double> @test_llvm_x86_avx512_gather3div4_df(i8* %b, <4 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div4_df:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vgatherqpd (%rdi,%ymm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovapd %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> zeroinitializer, i8* %b, <4 x i64> %iv, i8 -1, i32 1)
+ ret <4 x double> %v
+}
+
+declare <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, i8, i32)
+
+define <4 x i32> @test_llvm_x86_avx512_gather3siv4_si(i8* %b, <4 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv4_si:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vpgatherdd (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32> zeroinitializer, i8* %b, <4 x i32> %iv, i8 -1, i32 1)
+ ret <4 x i32> %v
+}
+
+declare <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32>, i8*, <2 x i64>, i8, i32)
+
+define <4 x i32> @test_llvm_x86_avx512_gather3div4_si(i8* %b, <2 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div4_si:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vpgatherqd (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32> zeroinitializer, i8* %b, <2 x i64> %iv, i8 -1, i32 1)
+ ret <4 x i32> %v
+}
+
+declare <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, i8, i32)
+
+define <2 x i64> @test_llvm_x86_avx512_gather3siv2_di(i8* %b, <4 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv2_di:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vpgatherdq (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64> zeroinitializer, i8* %b, <4 x i32> %iv, i8 -1, i32 1)
+ ret <2 x i64> %v
+}
+
+declare <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64>, i8*, <2 x i64>, i8, i32)
+
+define <2 x i64> @test_llvm_x86_avx512_gather3div2_di(i8* %b, <2 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div2_di:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vpgatherqq (%rdi,%xmm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> zeroinitializer, i8* %b, <2 x i64> %iv, i8 -1, i32 1)
+ ret <2 x i64> %v
+}
+
+declare <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, i8, i32)
+
+define <8 x i32> @test_llvm_x86_avx512_gather3siv8_si(i8* %b, <8 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv8_si:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vpgatherdd (%rdi,%ymm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32> zeroinitializer, i8* %b, <8 x i32> %iv, i8 -1, i32 1)
+ ret <8 x i32> %v
+}
+
+declare <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32>, i8*, <4 x i64>, i8, i32)
+
+define <4 x i32> @test_llvm_x86_avx512_gather3div8_si(i8* %b, <4 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div8_si:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vpgatherqd (%rdi,%ymm0), %xmm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> zeroinitializer, i8* %b, <4 x i64> %iv, i8 -1, i32 1)
+ ret <4 x i32> %v
+}
+
+declare <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, i8, i32)
+
+define <4 x i64> @test_llvm_x86_avx512_gather3siv4_di(i8* %b, <4 x i32> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3siv4_di:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %xmm2
+; CHECK-NEXT: vpor %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vpgatherdq (%rdi,%xmm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> zeroinitializer, i8* %b, <4 x i32> %iv, i8 -1, i32 1)
+ ret <4 x i64> %v
+}
+
+declare <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64>, i8*, <4 x i64>, i8, i32)
+
+define <4 x i64> @test_llvm_x86_avx512_gather3div4_di(i8* %b, <4 x i64> %iv) #2 {
+; CHECK-LABEL: test_llvm_x86_avx512_gather3div4_di:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsp, %rax
+; CHECK-NEXT: movq $-1, %rcx
+; CHECK-NEXT: sarq $63, %rax
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: orq %rax, %rdi
+; CHECK-NEXT: vpbroadcastq %rax, %ymm2
+; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vpgatherqq (%rdi,%ymm0), %ymm1 {%k1}
+; CHECK-NEXT: shlq $47, %rax
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: orq %rax, %rsp
+; CHECK-NEXT: retq
+entry:
+ %v = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> zeroinitializer, i8* %b, <4 x i64> %iv, i8 -1, i32 1)
+ ret <4 x i64> %v
+}
+
+attributes #0 = { nounwind "target-features"="+avx2" }
+attributes #1 = { nounwind "target-features"="+avx512f" }
+attributes #2 = { nounwind "target-features"="+avx512vl" }
More information about the llvm-commits
mailing list