[llvm] fb91f0a - [X86] combineConcatVectorOps - add FADD/FSUB/FMUL/FDIV handling
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 2 05:06:26 PST 2023
Author: Simon Pilgrim
Date: 2023-02-02T13:06:01Z
New Revision: fb91f0a298492ed7db8928e18cda35228474007e
URL: https://github.com/llvm/llvm-project/commit/fb91f0a298492ed7db8928e18cda35228474007e
DIFF: https://github.com/llvm/llvm-project/commit/fb91f0a298492ed7db8928e18cda35228474007e.diff
LOG: [X86] combineConcatVectorOps - add FADD/FSUB/FMUL/FDIV handling
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/widen_fadd.ll
llvm/test/CodeGen/X86/widen_fdiv.ll
llvm/test/CodeGen/X86/widen_fmul.ll
llvm/test/CodeGen/X86/widen_fsub.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 415064433089c..2d3e8e6e610aa 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55359,6 +55359,20 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
}
break;
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ case ISD::FDIV:
+ if (!IsSplat && (VT.is256BitVector() ||
+ (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
+ MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
+ SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
+ NumOps * SrcVT.getVectorNumElements());
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ ConcatSubOperand(SrcVT, Ops, 0),
+ ConcatSubOperand(SrcVT, Ops, 1));
+ }
+ break;
case X86ISD::HADD:
case X86ISD::HSUB:
case X86ISD::FHADD:
diff --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 68f2ed4368044..e910c245ff2f9 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -72,15 +72,15 @@ define void @widen_fadd_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vaddps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vaddps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
; AVX1OR2-NEXT: vzeroupper
@@ -93,15 +93,15 @@ define void @widen_fadd_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vaddps %xmm4, %xmm0, %xmm0
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vaddps %xmm4, %xmm1, %xmm1
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vaddps %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vaddps %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vaddps %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512F-NEXT: vmovups %ymm0, (%rdx)
; AVX512F-NEXT: vzeroupper
@@ -114,17 +114,17 @@ define void @widen_fadd_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vaddps %xmm4, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vaddps %xmm4, %xmm1, %xmm1
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vaddps %xmm4, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm6, %xmm3, %xmm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
; AVX512VL-NEXT: vzeroupper
@@ -201,31 +201,31 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm4, %xmm3, %xmm3
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm5, %xmm4, %xmm4
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm6, %xmm5, %xmm5
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm7, %xmm6, %xmm6
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT: vaddps %xmm7, %xmm8, %xmm7
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm9 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm10 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm11 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm12 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm13 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm14 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm15 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vaddps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vaddps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm15, %ymm11, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm14, %ymm10, %ymm1
+; AVX1OR2-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm13, %ymm9, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm12, %ymm8, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
; AVX1OR2-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/widen_fdiv.ll b/llvm/test/CodeGen/X86/widen_fdiv.ll
index f2ffa4bde22cf..b2007c19dc0f4 100644
--- a/llvm/test/CodeGen/X86/widen_fdiv.ll
+++ b/llvm/test/CodeGen/X86/widen_fdiv.ll
@@ -72,15 +72,15 @@ define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vdivps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vdivps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
; AVX1OR2-NEXT: vzeroupper
@@ -93,15 +93,15 @@ define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vdivps %xmm4, %xmm0, %xmm0
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vdivps %xmm4, %xmm1, %xmm1
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vdivps %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vdivps %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vdivps %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512F-NEXT: vmovups %ymm0, (%rdx)
; AVX512F-NEXT: vzeroupper
@@ -114,17 +114,17 @@ define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vdivps %xmm4, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vdivps %xmm4, %xmm1, %xmm1
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vdivps %xmm4, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm6, %xmm3, %xmm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vdivps %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
; AVX512VL-NEXT: vzeroupper
@@ -201,31 +201,31 @@ define void @widen_fdiv_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm4, %xmm3, %xmm3
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm5, %xmm4, %xmm4
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm6, %xmm5, %xmm5
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm7, %xmm6, %xmm6
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT: vdivps %xmm8, %xmm7, %xmm7
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm9 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm10 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm11 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm12 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm13 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm14 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm15 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vdivps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vdivps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm15, %ymm11, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm14, %ymm10, %ymm1
+; AVX1OR2-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm13, %ymm9, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm12, %ymm8, %ymm2
+; AVX1OR2-NEXT: vdivps %ymm1, %ymm2, %ymm1
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
; AVX1OR2-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index ac208da9ee11a..e5bb954a01eef 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -72,15 +72,15 @@ define void @widen_fmul_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vmulps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vmulps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
; AVX1OR2-NEXT: vzeroupper
@@ -93,15 +93,15 @@ define void @widen_fmul_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vmulps %xmm4, %xmm0, %xmm0
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vmulps %xmm4, %xmm1, %xmm1
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vmulps %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vmulps %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vmulps %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512F-NEXT: vmovups %ymm0, (%rdx)
; AVX512F-NEXT: vzeroupper
@@ -114,17 +114,17 @@ define void @widen_fmul_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vmulps %xmm4, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vmulps %xmm4, %xmm1, %xmm1
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vmulps %xmm4, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm6, %xmm3, %xmm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vmulps %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
; AVX512VL-NEXT: vzeroupper
@@ -201,31 +201,31 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm4, %xmm3, %xmm3
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm5, %xmm4, %xmm4
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm6, %xmm5, %xmm5
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm7, %xmm6, %xmm6
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT: vmulps %xmm7, %xmm8, %xmm7
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm9 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm10 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm11 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm12 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm13 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm14 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm15 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vmulps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vmulps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm15, %ymm11, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm14, %ymm10, %ymm1
+; AVX1OR2-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm13, %ymm9, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm12, %ymm8, %ymm2
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm2, %ymm1
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
; AVX1OR2-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 90cf455ba61fc..38ee83a545b7a 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -72,15 +72,15 @@ define void @widen_fsub_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vsubps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vsubps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
; AVX1OR2-NEXT: vzeroupper
@@ -93,15 +93,15 @@ define void @widen_fsub_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vsubps %xmm4, %xmm0, %xmm0
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vsubps %xmm4, %xmm1, %xmm1
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vsubps %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512F-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vsubps %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vsubps %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512F-NEXT: vmovups %ymm0, (%rdx)
; AVX512F-NEXT: vzeroupper
@@ -114,17 +114,17 @@ define void @widen_fsub_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vsubps %xmm4, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vsubps %xmm4, %xmm1, %xmm1
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vsubps %xmm4, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX512VL-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm6, %xmm3, %xmm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3
; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vsubps %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
; AVX512VL-NEXT: vzeroupper
@@ -201,31 +201,31 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm0, %xmm0
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm1, %xmm1
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm2, %xmm2
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm4, %xmm3, %xmm3
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm5, %xmm4, %xmm4
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm6, %xmm5, %xmm5
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
-; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm7, %xmm6, %xmm6
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
-; AVX1OR2-NEXT: vsubps %xmm8, %xmm7, %xmm7
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm9 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm10 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm11 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm12 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm13 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm14 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm15 = mem[0],zero
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vsubps %ymm5, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm3
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vsubps %ymm3, %ymm0, %ymm0
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
-; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm15, %ymm11, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm14, %ymm10, %ymm1
+; AVX1OR2-NEXT: vsubps %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm13, %ymm9, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm12, %ymm8, %ymm2
+; AVX1OR2-NEXT: vsubps %ymm1, %ymm2, %ymm1
; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
; AVX1OR2-NEXT: vzeroupper
More information about the llvm-commits
mailing list