[llvm] r352922 - [X86][AVX] Add VMOVDDUP-VPBROADCASTQ execution domain mapping
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 1 13:41:30 PST 2019
Author: rksimon
Date: Fri Feb 1 13:41:30 2019
New Revision: 352922
URL: http://llvm.org/viewvc/llvm-project?rev=352922&view=rev
Log:
[X86][AVX] Add VMOVDDUP-VPBROADCASTQ execution domain mapping
Noticed in D57514.
Differential Revision: https://reviews.llvm.org/D57519
Modified:
llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll
llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll
llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
llvm/trunk/test/CodeGen/X86/insert-loaded-scalar.ll
llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll
llvm/trunk/test/CodeGen/X86/oddshuffles.ll
llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-xop.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll
llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll
llvm/trunk/test/CodeGen/X86/vector-trunc.ll
llvm/trunk/test/CodeGen/X86/widened-broadcast.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Fri Feb 1 13:41:30 2019
@@ -6040,6 +6040,8 @@ static const uint16_t ReplaceableInstrs[
{ X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m },
{ X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr },
{ X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm },
+ { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r },
+ { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m },
{ X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r },
{ X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m },
{ X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr },
@@ -6130,6 +6132,8 @@ static const uint16_t ReplaceableInstrsA
{ X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
{ X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
{ X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
+ { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm},
+ { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr},
{ X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
{ X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
{ X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll Fri Feb 1 13:41:30 2019
@@ -319,7 +319,7 @@ define <4 x i64> @test_mm256_broadcastd_
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastq_epi64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %res
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll Fri Feb 1 13:41:30 2019
@@ -234,7 +234,7 @@ declare <8 x i32> @llvm.x86.avx2.pbroadc
define <2 x i64> @test_x86_avx2_pbroadcastq_128(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastq_128:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64> %a0)
ret <2 x i64> %res
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Fri Feb 1 13:41:30 2019
@@ -189,12 +189,12 @@ define <2 x i64> @Q64(i64* %ptr) nounwin
; X32-LABEL: Q64:
; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vpbroadcastq (%eax), %xmm0
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: Q64:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: vpbroadcastq (%rdi), %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 4
@@ -233,7 +233,7 @@ define <8 x i16> @broadcast_mem_v4i16_v8
;
; X64-LABEL: broadcast_mem_v4i16_v8i16:
; X64: ## %bb.0:
-; X64-NEXT: vpbroadcastq (%rdi), %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%load = load <4 x i16>, <4 x i16>* %ptr
%shuf = shufflevector <4 x i16> %load, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -471,7 +471,7 @@ define <2 x i64> @load_splat_2i64_2i64_1
;
; X64-LABEL: load_splat_2i64_2i64_1111:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: vpbroadcastq 8(%rdi), %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
%ld = load <2 x i64>, <2 x i64>* %ptr
@@ -865,12 +865,12 @@ define <4 x i64> @_inreg4xi64(<4 x i64
define <2 x i64> @_inreg2xi64(<2 x i64> %a) {
; X32-LABEL: _inreg2xi64:
; X32: ## %bb.0:
-; X32-NEXT: vpbroadcastq %xmm0, %xmm0
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: _inreg2xi64:
; X64: ## %bb.0:
-; X64-NEXT: vpbroadcastq %xmm0, %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %b
@@ -1327,9 +1327,9 @@ define void @isel_crash_2q(i64* %cV_R.ad
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: vmovaps %xmm0, (%esp)
-; X32-NEXT: vpbroadcastq (%eax), %xmm1
+; X32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; X32-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT: vmovdqa %xmm1, {{[0-9]+}}(%esp)
+; X32-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
; X32-NEXT: addl $60, %esp
; X32-NEXT: retl
;
@@ -1337,9 +1337,9 @@ define void @isel_crash_2q(i64* %cV_R.ad
; X64: ## %bb.0: ## %entry
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: vpbroadcastq (%rdi), %xmm1
+; X64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
entry:
%__a.addr.i = alloca <2 x i64>, align 16
Modified: llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll Fri Feb 1 13:41:30 2019
@@ -2324,7 +2324,7 @@ define <16 x i32> @test_masked_z_i32_to_
define <2 x i64> @test_i64_to_2_mem(i64* %p) {
; CHECK-LABEL: test_i64_to_2_mem:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; CHECK-NEXT: retq
%s = load i64, i64* %p
%vec = insertelement <2 x i64> undef, i64 %s, i32 0
Modified: llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll Fri Feb 1 13:41:30 2019
@@ -6,7 +6,7 @@
define <4 x i32> @test_2xi32_to_4xi32(<4 x i32> %vec) {
; CHECK-LABEL: test_2xi32_to_4xi32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: retq
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x i32> %res
@@ -318,7 +318,7 @@ define <16 x i32> @test_masked_z_2xi32_t
define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) {
; CHECK-LABEL: test_2xi32_to_4xi32_mem:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; CHECK-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
%res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
Modified: llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll Fri Feb 1 13:41:30 2019
@@ -1160,9 +1160,9 @@ define <4 x i32> @test_masked_z_8xi32_to
define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,3]
-; CHECK-NEXT: vpbroadcastq 8(%rdi), %xmm1
-; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,1,2,3]
+; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; CHECK-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %vp
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 5, i32 3, i32 2, i32 7>
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll Fri Feb 1 13:41:30 2019
@@ -2104,7 +2104,7 @@ define <4 x i64> @test_mm256_maskz_broad
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastq_epi64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %res
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll Fri Feb 1 13:41:30 2019
@@ -3910,8 +3910,9 @@ define <2 x i64> @test_mask_andnot_epi64
; X86-LABEL: test_mask_andnot_epi64_rmb_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT: vpbroadcastq (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0x08]
-; X86-NEXT: vpandn %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1]
+; X86-NEXT: vmovddup (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x08]
+; X86-NEXT: # xmm1 = mem[0,0]
+; X86-NEXT: vandnps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x55,0xc1]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_mask_andnot_epi64_rmb_128:
Modified: llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll Fri Feb 1 13:41:30 2019
@@ -95,8 +95,7 @@ define <16 x i8> @f16xi8_i64(<16 x i8> %
;
; ALL32-LABEL: f16xi8_i64:
; ALL32: # %bb.0:
-; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275]
-; ALL32-NEXT: # xmm1 = mem[0,0]
+; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
@@ -692,8 +691,7 @@ define <8 x i16> @f8xi16_i64(<8 x i16> %
;
; ALL32-LABEL: f8xi16_i64:
; ALL32: # %bb.0:
-; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309]
-; ALL32-NEXT: # xmm1 = mem[0,0]
+; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
@@ -1147,8 +1145,7 @@ define <4 x i32> @f4xi32_i64(<4 x i32> %
;
; ALL32-LABEL: f4xi32_i64:
; ALL32: # %bb.0:
-; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314]
-; ALL32-NEXT: # xmm1 = mem[0,0]
+; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314]
; ALL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
@@ -1624,7 +1621,8 @@ define <4 x float> @f4xf32_f64(<4 x floa
;
; ALL64-LABEL: f4xf32_f64:
; ALL64: # %bb.0:
-; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760]
+; ALL64-NEXT: vmovddup {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760]
+; ALL64-NEXT: # xmm1 = mem[0,0]
; ALL64-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vdivps %xmm0, %xmm1, %xmm0
; ALL64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/insert-loaded-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insert-loaded-scalar.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insert-loaded-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insert-loaded-scalar.ll Fri Feb 1 13:41:30 2019
@@ -178,15 +178,10 @@ define <2 x i64> @load64_ins_eltc_v2i64(
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load64_ins_eltc_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load64_ins_eltc_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: load64_ins_eltc_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%x = load i64, i64* %p
%ins = insertelement <2 x i64> undef, i64 %x, i32 1
ret <2 x i64> %ins
Modified: llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-var-index.ll Fri Feb 1 13:41:30 2019
@@ -203,15 +203,10 @@ define <2 x i64> @load_i64_v2i64(i64* %p
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_i64_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_i64_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: load_i64_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%x = load i64, i64* %p
%ins = insertelement <2 x i64> undef, i64 %x, i32 %y
ret <2 x i64> %ins
Modified: llvm/trunk/test/CodeGen/X86/oddshuffles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/oddshuffles.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/oddshuffles.ll (original)
+++ llvm/trunk/test/CodeGen/X86/oddshuffles.ll Fri Feb 1 13:41:30 2019
@@ -1673,7 +1673,7 @@ define void @interleave_24i32_in(<24 x i
; XOP-LABEL: interleave_24i32_in:
; XOP: # %bb.0:
; XOP-NEXT: vmovupd (%rsi), %ymm0
-; XOP-NEXT: vmovupd (%rcx), %ymm1
+; XOP-NEXT: vmovups (%rcx), %ymm1
; XOP-NEXT: vmovups 16(%rcx), %xmm2
; XOP-NEXT: vmovups (%rdx), %xmm3
; XOP-NEXT: vmovups 16(%rdx), %xmm4
@@ -1744,8 +1744,8 @@ define <2 x double> @wrongorder(<4 x dou
; AVX2-LABEL: wrongorder:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm1
-; AVX2-NEXT: vmovapd %ymm1, 32(%rdi)
-; AVX2-NEXT: vmovapd %ymm1, (%rdi)
+; AVX2-NEXT: vmovaps %ymm1, 32(%rdi)
+; AVX2-NEXT: vmovaps %ymm1, (%rdi)
; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll Fri Feb 1 13:41:30 2019
@@ -5363,7 +5363,7 @@ define void @test_mm_store_pd1(double *%
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store_pd1:
@@ -5371,7 +5371,7 @@ define void @test_mm_store_pd1(double *%
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store_pd1:
@@ -5385,14 +5385,14 @@ define void @test_mm_store_pd1(double *%
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store_pd1:
; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
%arg0 = bitcast double * %a0 to <2 x double>*
%shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
@@ -5489,7 +5489,7 @@ define void @test_mm_store1_pd(double *%
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store1_pd:
@@ -5497,7 +5497,7 @@ define void @test_mm_store1_pd(double *%
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store1_pd:
@@ -5511,14 +5511,14 @@ define void @test_mm_store1_pd(double *%
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store1_pd:
; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
%arg0 = bitcast double * %a0 to <2 x double>*
%shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
Modified: llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll Fri Feb 1 13:41:30 2019
@@ -1562,20 +1562,10 @@ define <4 x i32> @test_2xi32_to_4xi32_me
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
-; X64-AVX1-LABEL: test_2xi32_to_4xi32_mem:
-; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X64-AVX1-NEXT: retq
-;
-; X64-AVX2-LABEL: test_2xi32_to_4xi32_mem:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; X64-AVX2-NEXT: retq
-;
-; X64-AVX512-LABEL: test_2xi32_to_4xi32_mem:
-; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
-; X64-AVX512-NEXT: retq
+; X64-LABEL: test_2xi32_to_4xi32_mem:
+; X64: # %bb.0:
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
%res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x i32> %res
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128.ll Fri Feb 1 13:41:30 2019
@@ -702,8 +702,8 @@ define <2 x i8> @var_shift_v2i8(<2 x i8>
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm2 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@@ -722,9 +722,9 @@ define <2 x i8> @var_shift_v2i8(<2 x i8>
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; XOPAVX1-NEXT: # xmm2 = mem[0,0]
-; XOPAVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT: vandpd %xmm2, %xmm1, %xmm1
-; XOPAVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vandps %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
@@ -1489,7 +1489,7 @@ define <2 x i8> @splatvar_shift_v2i8(<2
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm2 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -1509,9 +1509,9 @@ define <2 x i8> @splatvar_shift_v2i8(<2
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; XOPAVX1-NEXT: # xmm2 = mem[0,0]
-; XOPAVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; XOPAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
-; XOPAVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll Fri Feb 1 13:41:30 2019
@@ -20,12 +20,12 @@ define <2 x i64> @shuffle_v2i64_00(<2 x
;
; AVX2-LABEL: shuffle_v2i64_00:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_00:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
ret <2 x i64> %shuffle
@@ -69,12 +69,12 @@ define <2 x i64> @shuffle_v2i64_22(<2 x
;
; AVX2-LABEL: shuffle_v2i64_22:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm1, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_22:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm0
+; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 2>
ret <2 x i64> %shuffle
@@ -1264,20 +1264,10 @@ define <2 x i64> @insert_dup_mem_v2i64(i
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: insert_dup_mem_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: insert_dup_mem_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512VL-LABEL: insert_dup_mem_v2i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX512VL-NEXT: retq
+; AVX-LABEL: insert_dup_mem_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%tmp = load i64, i64* %ptr, align 1
%tmp1 = insertelement <2 x i64> undef, i64 %tmp, i32 0
%tmp2 = shufflevector <2 x i64> %tmp1, <2 x i64> undef, <2 x i32> zeroinitializer
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll Fri Feb 1 13:41:30 2019
@@ -539,9 +539,9 @@ define <4 x i32> @shuffle_v4i32_0451(<4
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0451:
; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; AVX2OR512VL-NEXT: vpbroadcastq %xmm0, %xmm0
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX2OR512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
ret <4 x i32> %shuffle
@@ -595,9 +595,9 @@ define <4 x i32> @shuffle_v4i32_4015(<4
;
; AVX2OR512VL-LABEL: shuffle_v4i32_4015:
; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpbroadcastq %xmm1, %xmm1
-; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
+; AVX2OR512VL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
ret <4 x i32> %shuffle
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Fri Feb 1 13:41:30 2019
@@ -49,7 +49,7 @@ define <4 x double> @shuffle_v4f64_0020(
; AVX1-LABEL: shuffle_v4f64_0020:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -321,7 +321,7 @@ define <4 x double> @shuffle_v4f64_0423(
; ALL-LABEL: shuffle_v4f64_0423:
; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
ret <4 x double> %shuffle
@@ -923,14 +923,14 @@ define <4 x i64> @shuffle_v4i64_0412(<4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0412:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,2]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,2]
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0412:
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll Fri Feb 1 13:41:30 2019
@@ -217,7 +217,7 @@ define <8 x i32> @combine_permd_as_vpbro
define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastq128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
ret <16 x i8> %1
@@ -648,7 +648,7 @@ define <32 x i8> @combine_pshufb_as_pack
define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
; X86-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
; X86: # %bb.0:
-; X86-NEXT: vpbroadcastq {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X86-NEXT: retl
;
; X64-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-xop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-xop.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-xop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-xop.ll Fri Feb 1 13:41:30 2019
@@ -234,7 +234,7 @@ define void @buildvector_v4f32_0404(floa
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X86-AVX2-NEXT: vmovapd %xmm0, (%eax)
+; X86-AVX2-NEXT: vmovaps %xmm0, (%eax)
; X86-AVX2-NEXT: retl
;
; X64-AVX-LABEL: buildvector_v4f32_0404:
@@ -247,7 +247,7 @@ define void @buildvector_v4f32_0404(floa
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-AVX2-NEXT: vmovapd %xmm0, (%rdi)
+; X64-AVX2-NEXT: vmovaps %xmm0, (%rdi)
; X64-AVX2-NEXT: retq
%v0 = insertelement <4 x float> undef, float %a, i32 0
%v1 = insertelement <4 x float> %v0, float %b, i32 1
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll Fri Feb 1 13:41:30 2019
@@ -677,7 +677,7 @@ define <4 x i32> @combine_nested_undef_t
;
; AVX2-LABEL: combine_nested_undef_test4:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 3>
@@ -1044,8 +1044,8 @@ define <4 x i32> @combine_nested_undef_t
;
; AVX2-LABEL: combine_nested_undef_test21:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 3>
@@ -1114,7 +1114,7 @@ define <4 x i32> @combine_nested_undef_t
;
; AVX2-LABEL: combine_nested_undef_test25:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 5, i32 2, i32 4>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 3, i32 1>
@@ -1149,7 +1149,7 @@ define <4 x i32> @combine_nested_undef_t
;
; AVX2-LABEL: combine_nested_undef_test27:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 2, i32 1, i32 5, i32 4>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll Fri Feb 1 13:41:30 2019
@@ -681,21 +681,21 @@ define <16 x i8> @trunc_add_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -1518,21 +1518,21 @@ define <16 x i8> @trunc_sub_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3119,28 +3119,28 @@ define <16 x i8> @trunc_and_v16i64_v16i8
;
; AVX1-LABEL: trunc_and_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3518,21 +3518,21 @@ define <16 x i8> @trunc_and_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3915,28 +3915,28 @@ define <16 x i8> @trunc_xor_v16i64_v16i8
;
; AVX1-LABEL: trunc_xor_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -4314,21 +4314,21 @@ define <16 x i8> @trunc_xor_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -4711,28 +4711,28 @@ define <16 x i8> @trunc_or_v16i64_v16i8(
;
; AVX1-LABEL: trunc_or_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -5110,21 +5110,21 @@ define <16 x i8> @trunc_or_const_v16i64_
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll Fri Feb 1 13:41:30 2019
@@ -681,21 +681,21 @@ define <16 x i8> @trunc_add_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -1518,21 +1518,21 @@ define <16 x i8> @trunc_sub_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3119,28 +3119,28 @@ define <16 x i8> @trunc_and_v16i64_v16i8
;
; AVX1-LABEL: trunc_and_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3518,21 +3518,21 @@ define <16 x i8> @trunc_and_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3915,28 +3915,28 @@ define <16 x i8> @trunc_xor_v16i64_v16i8
;
; AVX1-LABEL: trunc_xor_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -4314,21 +4314,21 @@ define <16 x i8> @trunc_xor_const_v16i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -4711,28 +4711,28 @@ define <16 x i8> @trunc_or_v16i64_v16i8(
;
; AVX1-LABEL: trunc_or_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -5110,21 +5110,21 @@ define <16 x i8> @trunc_or_const_v16i64_
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll Fri Feb 1 13:41:30 2019
@@ -288,12 +288,12 @@ define void @trunc8i64_8i8(<8 x i64> %a)
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm3 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc.ll Fri Feb 1 13:41:30 2019
@@ -288,12 +288,12 @@ define void @trunc8i64_8i8(<8 x i64> %a)
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm3 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/widened-broadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widened-broadcast.ll?rev=352922&r1=352921&r2=352922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widened-broadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widened-broadcast.ll Fri Feb 1 13:41:30 2019
@@ -101,12 +101,12 @@ define <4 x i32> @load_splat_4i32_4i32_0
;
; AVX2-LABEL: load_splat_4i32_4i32_0101:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_4i32_4i32_0101:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX512-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
@@ -198,12 +198,12 @@ define <8 x i16> @load_splat_8i16_8i16_0
;
; AVX2-LABEL: load_splat_8i16_8i16_01230123:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i16_8i16_01230123:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX512-NEXT: retq
entry:
%ld = load <8 x i16>, <8 x i16>* %ptr
@@ -368,12 +368,12 @@ define <16 x i8> @load_splat_16i8_16i8_0
;
; AVX2-LABEL: load_splat_16i8_16i8_0123456701234567:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i8_16i8_0123456701234567:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX512-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
@@ -580,20 +580,10 @@ define <4 x i32> @load_splat_4i32_2i32_0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_splat_4i32_2i32_0101:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_splat_4i32_2i32_0101:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_splat_4i32_2i32_0101:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX512-NEXT: retq
+; AVX-LABEL: load_splat_4i32_2i32_0101:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
%res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x i32> %res
More information about the llvm-commits
mailing list