[llvm] 649b149 - [X86] canonicalizeShuffleWithBinOps - all merging shuffles with INSERT_SUBVECTOR as well as generic target shuffles.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 2 05:56:04 PST 2023


Author: Simon Pilgrim
Date: 2023-02-02T13:55:56Z
New Revision: 649b14928a67e016f3e01ac46499aaf1824c2d09

URL: https://github.com/llvm/llvm-project/commit/649b14928a67e016f3e01ac46499aaf1824c2d09
DIFF: https://github.com/llvm/llvm-project/commit/649b14928a67e016f3e01ac46499aaf1824c2d09.diff

LOG: [X86] canonicalizeShuffleWithBinOps - all merging shuffles with INSERT_SUBVECTOR as well as generic target shuffles.

We can probably expand this to more faux shuffles as time goes on.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/widen_fadd.ll
    llvm/test/CodeGen/X86/widen_fdiv.ll
    llvm/test/CodeGen/X86/widen_fmul.ll
    llvm/test/CodeGen/X86/widen_fsub.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2d3e8e6e610a..ba4cb403a9e6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -40677,6 +40677,7 @@ static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
            ISD::isBuildVectorAllZeros(Op.getNode()) ||
            ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
            ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
+           (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op->hasOneUse()) ||
            (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
            (FoldLoad && isShuffleFoldableLoad(Op)) ||
            DAG.isSplatValue(Op, /*AllowUndefs*/ false);

diff  --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index e910c245ff2f..96ea5a6aff4d 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -67,42 +67,16 @@ define void @widen_fadd_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fadd_v2f32_v8f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vaddps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vaddps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vaddps (%rsi), %ymm0, %ymm0
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512F-LABEL: widen_fadd_v2f32_v8f32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX512F-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vaddps %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vaddps %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vaddps (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -196,38 +170,12 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fadd_v2f32_v16f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm9 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm10 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm11 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm12 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm13 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm14 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm15 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vaddps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vaddps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vmovups 32(%rdi), %ymm1
+; AVX1OR2-NEXT:    vaddps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vaddps 32(%rsi), %ymm1, %ymm1
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm15, %ymm11, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm14, %ymm10, %ymm1
-; AVX1OR2-NEXT:    vaddps %ymm0, %ymm1, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm13, %ymm9, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm2
-; AVX1OR2-NEXT:    vaddps %ymm1, %ymm2, %ymm1
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX1OR2-NEXT:    vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT:    vmovups %ymm1, 32(%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/widen_fdiv.ll b/llvm/test/CodeGen/X86/widen_fdiv.ll
index b2007c19dc0f..e4c9278478a5 100644
--- a/llvm/test/CodeGen/X86/widen_fdiv.ll
+++ b/llvm/test/CodeGen/X86/widen_fdiv.ll
@@ -67,42 +67,16 @@ define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fdiv_v2f32_v8f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vdivps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vdivps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vdivps (%rsi), %ymm0, %ymm0
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512F-LABEL: widen_fdiv_v2f32_v8f32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX512F-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vdivps %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vdivps %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vdivps (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -196,38 +170,12 @@ define void @widen_fdiv_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fdiv_v2f32_v16f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm9 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm10 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm11 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm12 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm13 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm14 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm15 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vdivps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vdivps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vmovups 32(%rdi), %ymm1
+; AVX1OR2-NEXT:    vdivps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vdivps 32(%rsi), %ymm1, %ymm1
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm15, %ymm11, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm14, %ymm10, %ymm1
-; AVX1OR2-NEXT:    vdivps %ymm0, %ymm1, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm13, %ymm9, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm2
-; AVX1OR2-NEXT:    vdivps %ymm1, %ymm2, %ymm1
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX1OR2-NEXT:    vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT:    vmovups %ymm1, 32(%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index e5bb954a01ee..4c6d5e4001e7 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -67,42 +67,16 @@ define void @widen_fmul_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fmul_v2f32_v8f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vmulps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vmulps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vmulps (%rsi), %ymm0, %ymm0
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512F-LABEL: widen_fmul_v2f32_v8f32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX512F-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vmulps %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vmulps %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vmulps (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -196,38 +170,12 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fmul_v2f32_v16f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm9 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm10 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm11 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm12 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm13 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm14 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm15 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vmulps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vmulps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vmovups 32(%rdi), %ymm1
+; AVX1OR2-NEXT:    vmulps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vmulps 32(%rsi), %ymm1, %ymm1
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm15, %ymm11, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm14, %ymm10, %ymm1
-; AVX1OR2-NEXT:    vmulps %ymm0, %ymm1, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm13, %ymm9, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm2
-; AVX1OR2-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX1OR2-NEXT:    vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT:    vmovups %ymm1, 32(%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 38ee83a545b7..91645c3329a5 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -67,42 +67,16 @@ define void @widen_fsub_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fsub_v2f32_v8f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vsubps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vsubps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vsubps (%rsi), %ymm0, %ymm0
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512F-LABEL: widen_fsub_v2f32_v8f32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX512F-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX512F-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vsubps %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vsubps %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vsubps (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -196,38 +170,12 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ;
 ; AVX1OR2-LABEL: widen_fsub_v2f32_v16f32:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm9 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm10 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm11 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm12 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm13 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm14 = mem[0],zero
-; AVX1OR2-NEXT:    vmovsd {{.*#+}} xmm15 = mem[0],zero
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vsubps %ymm5, %ymm1, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm3
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vsubps %ymm3, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vmovups 32(%rdi), %ymm1
+; AVX1OR2-NEXT:    vsubps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vsubps 32(%rsi), %ymm1, %ymm1
 ; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm15, %ymm11, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm14, %ymm10, %ymm1
-; AVX1OR2-NEXT:    vsubps %ymm0, %ymm1, %ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm13, %ymm9, %ymm1
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm2
-; AVX1OR2-NEXT:    vsubps %ymm1, %ymm2, %ymm1
-; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX1OR2-NEXT:    vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT:    vmovups %ymm1, 32(%rdx)
 ; AVX1OR2-NEXT:    vzeroupper
 ; AVX1OR2-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list