[llvm] [DAG] Use known-bits when creating umulh/smulh. (PR #160916)

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 2 06:18:44 PDT 2026


https://github.com/davemgreen updated https://github.com/llvm/llvm-project/pull/160916

>From 7ca16677c6bf5857d720f98b59bb0b63583539fa Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Thu, 2 Apr 2026 14:14:16 +0100
Subject: [PATCH] [DAG] Use known-bits when creating umulh/smulh.

This extends the creation of umulh/smulh instructions to handle cases where one
operand is a zext/sext and the other has enough known-zero or sign bits to
create a mulh. This can be useful when one of the operands is hoisted out of a
loop.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  23 ++-
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            |   4 +-
 llvm/test/CodeGen/AMDGPU/udiv64.ll            |   8 +-
 llvm/test/CodeGen/AMDGPU/urem64.ll            |  16 +-
 llvm/test/CodeGen/Thumb2/mve-vmulh.ll         | 144 +++----------
 llvm/test/CodeGen/X86/combine-pmuldq.ll       | 191 +++++++++++-------
 6 files changed, 174 insertions(+), 212 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d26300f0b4c24..7aaef49734b6e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -11030,6 +11030,10 @@ static SDValue combineShiftToMULH(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
   SDValue LeftOp = ShiftOperand.getOperand(0);
   SDValue RightOp = ShiftOperand.getOperand(1);
 
+  if (LeftOp.getOpcode() != ISD::SIGN_EXTEND &&
+      LeftOp.getOpcode() != ISD::ZERO_EXTEND)
+    std::swap(LeftOp, RightOp);
+
   bool IsSignExt = LeftOp.getOpcode() == ISD::SIGN_EXTEND;
   bool IsZeroExt = LeftOp.getOpcode() == ISD::ZERO_EXTEND;
 
@@ -11062,18 +11066,17 @@ static SDValue combineShiftToMULH(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
   }
 
   SDValue MulhRightOp;
-  if (ConstantSDNode *Constant = isConstOrConstSplat(RightOp)) {
-    unsigned ActiveBits = IsSignExt
-                              ? Constant->getAPIntValue().getSignificantBits()
-                              : Constant->getAPIntValue().getActiveBits();
-    if (ActiveBits > NarrowVTSize)
+  if (LeftOp.getOpcode() != RightOp.getOpcode()) {
+    if (IsZeroExt && ShiftOperand.hasOneUse() &&
+        DAG.computeKnownBits(RightOp).countMaxActiveBits() <= NarrowVTSize) {
+      MulhRightOp = DAG.getNode(ISD::TRUNCATE, DL, NarrowVT, RightOp);
+    } else if (IsSignExt && ShiftOperand.hasOneUse() &&
+               DAG.ComputeMaxSignificantBits(RightOp) <= NarrowVTSize) {
+      MulhRightOp = DAG.getNode(ISD::TRUNCATE, DL, NarrowVT, RightOp);
+    } else {
       return SDValue();
-    MulhRightOp = DAG.getConstant(
-        Constant->getAPIntValue().trunc(NarrowVT.getScalarSizeInBits()), DL,
-        NarrowVT);
+    }
   } else {
-    if (LeftOp.getOpcode() != RightOp.getOpcode())
-      return SDValue();
     // Check that the two extend nodes are the same type.
     if (NarrowVT != RightOp.getOperand(0).getValueType())
       return SDValue();
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index fdb20f372ab8d..c9c9cc06bb00b 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -560,7 +560,7 @@ define i64 @v_test_sdiv24_64(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_mul_lo_u32 v3, v3, v2
 ; GCN-NEXT:    v_mul_hi_u32 v3, v2, v3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_hi_u32 v2, v1, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, v2, v1
 ; GCN-NEXT:    v_mul_u32_u24_e32 v3, v2, v0
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
 ; GCN-NEXT:    v_sub_i32_e32 v1, vcc, v1, v3
@@ -587,7 +587,7 @@ define i64 @v_test_sdiv24_64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v2
 ; GCN-IR-NEXT:    v_mul_hi_u32 v3, v2, v3
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, v1, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, v2, v1
 ; GCN-IR-NEXT:    v_mul_u32_u24_e32 v3, v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
 ; GCN-IR-NEXT:    v_sub_i32_e32 v1, vcc, v1, v3
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 1c50f930facba..90f0cae851151 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -511,7 +511,7 @@ define amdgpu_kernel void @s_test_udiv32_i64(ptr addrspace(1) %out, i64 %x, i64
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
 ; GCN-NEXT:    s_mul_i32 s0, s0, s8
 ; GCN-NEXT:    s_sub_i32 s0, s3, s0
@@ -547,7 +547,7 @@ define amdgpu_kernel void @s_test_udiv32_i64(ptr addrspace(1) %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_mov_b32 s4, s0
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v0, s3
 ; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
 ; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
 ; GCN-IR-NEXT:    s_sub_i32 s0, s3, s0
@@ -591,7 +591,7 @@ define amdgpu_kernel void @s_test_udiv31_i64(ptr addrspace(1) %out, i64 %x, i64
 ; GCN-NEXT:    s_lshr_b32 s2, s3, 1
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
 ; GCN-NEXT:    s_mul_i32 s0, s0, s8
@@ -629,7 +629,7 @@ define amdgpu_kernel void @s_test_udiv31_i64(ptr addrspace(1) %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_lshr_b32 s2, s3, 1
 ; GCN-IR-NEXT:    s_mov_b32 s4, s0
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v0, s2
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
 ; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
 ; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 28e6627b87413..4b79166c473eb 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -455,7 +455,7 @@ define amdgpu_kernel void @s_test_urem31_i64(ptr addrspace(1) %out, i64 %x, i64
 ; GCN-NEXT:    s_lshr_b32 s2, s3, 1
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
@@ -490,7 +490,7 @@ define amdgpu_kernel void @s_test_urem31_i64(ptr addrspace(1) %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_lshr_b32 s2, s3, 1
 ; GCN-IR-NEXT:    s_mov_b32 s4, s0
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v0, s2
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
@@ -532,7 +532,7 @@ define amdgpu_kernel void @s_test_urem31_v2i64(ptr addrspace(1) %out, <2 x i64>
 ; GCN-NEXT:    s_lshr_b32 s1, s9, 1
 ; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_mul_hi_u32 v0, s1, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v2
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s2, v0
@@ -550,7 +550,7 @@ define amdgpu_kernel void @s_test_urem31_v2i64(ptr addrspace(1) %out, <2 x i64>
 ; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mul_hi_u32 v0, v1, v0
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_mul_hi_u32 v2, s7, v0
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s7
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-NEXT:    v_mov_b32_e32 v3, v1
@@ -587,7 +587,7 @@ define amdgpu_kernel void @s_test_urem31_v2i64(ptr addrspace(1) %out, <2 x i64>
 ; GCN-IR-NEXT:    s_lshr_b32 s1, s9, 1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v0, s1, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v0, s1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v2
 ; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-IR-NEXT:    v_readfirstlane_b32 s2, v0
@@ -605,7 +605,7 @@ define amdgpu_kernel void @s_test_urem31_v2i64(ptr addrspace(1) %out, <2 x i64>
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v0, v1, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s7, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, v0, s7
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, v1
@@ -716,7 +716,7 @@ define amdgpu_kernel void @s_test_urem23_64_v2i64(ptr addrspace(1) %out, <2 x i6
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, s4
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s7
 ; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s5, v1
 ; GCN-NEXT:    v_and_b32_e32 v2, 0x7fffff, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s4, v0
@@ -763,7 +763,7 @@ define amdgpu_kernel void @s_test_urem23_64_v2i64(ptr addrspace(1) %out, <2 x i6
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, s4
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-IR-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v0, s7
 ; GCN-IR-NEXT:    v_sub_i32_e32 v1, vcc, s5, v1
 ; GCN-IR-NEXT:    v_and_b32_e32 v2, 0x7fffff, v1
 ; GCN-IR-NEXT:    v_readfirstlane_b32 s4, v0
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
index bd7401fee7263..204c98e38f868 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
@@ -1051,23 +1051,11 @@ entry:
 define arm_aapcs_vfpcc <4 x i32> @vmulhs_kb_v4i32(<4 x i32> %s0, <4 x i64> %s1) {
 ; CHECK-LABEL: vmulhs_kb_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vmov r1, s9
-; CHECK-NEXT:    vmov r2, s5
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov r0, s4
-; CHECK-NEXT:    smmul r0, r0, r1
-; CHECK-NEXT:    vmov r1, s0
-; CHECK-NEXT:    smmul r1, r1, r2
-; CHECK-NEXT:    vmov r2, s7
-; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT:    vmov r0, s6
-; CHECK-NEXT:    vmov r1, s11
-; CHECK-NEXT:    smmul r0, r0, r1
-; CHECK-NEXT:    vmov r1, s10
-; CHECK-NEXT:    smmul r1, r1, r2
-; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    vmov.f32 s4, s5
+; CHECK-NEXT:    vmov.f32 s5, s7
+; CHECK-NEXT:    vmov.f32 s6, s9
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmulh.s32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = sext <4 x i32> %s0 to <4 x i64>
@@ -1081,23 +1069,11 @@ entry:
 define arm_aapcs_vfpcc <4 x i32> @vmulhu_kb_v4i32(<4 x i32> %s0, <4 x i64> %s1) {
 ; CHECK-LABEL: vmulhu_kb_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vmov r1, s9
-; CHECK-NEXT:    vmov r2, s5
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov r0, s4
-; CHECK-NEXT:    umull r0, r1, r0, r1
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    umull r0, r2, r0, r2
-; CHECK-NEXT:    vmov r0, s6
-; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
-; CHECK-NEXT:    vmov r1, s11
-; CHECK-NEXT:    vmov r2, s7
-; CHECK-NEXT:    umull r0, r1, r0, r1
-; CHECK-NEXT:    vmov r0, s10
-; CHECK-NEXT:    umull r0, r2, r0, r2
-; CHECK-NEXT:    vmov q0[3], q0[1], r2, r1
+; CHECK-NEXT:    vmov.f32 s4, s5
+; CHECK-NEXT:    vmov.f32 s5, s7
+; CHECK-NEXT:    vmov.f32 s6, s9
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmulh.u32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <4 x i32> %s0 to <4 x i64>
@@ -1111,23 +1087,11 @@ entry:
 define arm_aapcs_vfpcc <4 x i32> @vmulhs_kbc_v4i32(<4 x i32> %s0, <4 x i64> %s1) {
 ; CHECK-LABEL: vmulhs_kbc_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vmov r1, s9
-; CHECK-NEXT:    vmov r2, s5
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov r0, s4
-; CHECK-NEXT:    smmul r0, r1, r0
-; CHECK-NEXT:    vmov r1, s0
-; CHECK-NEXT:    smmul r1, r2, r1
-; CHECK-NEXT:    vmov r2, s7
-; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT:    vmov r0, s6
-; CHECK-NEXT:    vmov r1, s11
-; CHECK-NEXT:    smmul r0, r1, r0
-; CHECK-NEXT:    vmov r1, s10
-; CHECK-NEXT:    smmul r1, r2, r1
-; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    vmov.f32 s4, s5
+; CHECK-NEXT:    vmov.f32 s5, s7
+; CHECK-NEXT:    vmov.f32 s6, s9
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmulh.s32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = sext <4 x i32> %s0 to <4 x i64>
@@ -1141,23 +1105,11 @@ entry:
 define arm_aapcs_vfpcc <4 x i32> @vmulhu_kbc_v4i32(<4 x i32> %s0, <4 x i64> %s1) {
 ; CHECK-LABEL: vmulhu_kbc_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vmov r1, s9
-; CHECK-NEXT:    vmov r2, s5
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov r0, s4
-; CHECK-NEXT:    umull r0, r1, r1, r0
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    umull r0, r2, r2, r0
-; CHECK-NEXT:    vmov r0, s6
-; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
-; CHECK-NEXT:    vmov r1, s11
-; CHECK-NEXT:    vmov r2, s7
-; CHECK-NEXT:    umull r0, r1, r1, r0
-; CHECK-NEXT:    vmov r0, s10
-; CHECK-NEXT:    umull r0, r2, r2, r0
-; CHECK-NEXT:    vmov q0[3], q0[1], r2, r1
+; CHECK-NEXT:    vmov.f32 s4, s5
+; CHECK-NEXT:    vmov.f32 s5, s7
+; CHECK-NEXT:    vmov.f32 s6, s9
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmulh.u32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <4 x i32> %s0 to <4 x i64>
@@ -1171,25 +1123,17 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @vmulhs_kb_v8i16(<8 x i16> %s0, <8 x i32> %s1) {
 ; CHECK-LABEL: vmulhs_kb_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmovlt.s16 q4, q0
 ; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmovlb.s16 q0, q0
 ; CHECK-NEXT:    vmov.f32 s5, s6
 ; CHECK-NEXT:    vmov.f32 s14, s9
 ; CHECK-NEXT:    vmov.f32 s15, s11
 ; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vshr.s32 q3, q3, #16
-; CHECK-NEXT:    vmov.f32 s7, s10
-; CHECK-NEXT:    vmul.i32 q3, q4, q3
-; CHECK-NEXT:    vshr.s32 q1, q1, #16
 ; CHECK-NEXT:    vshr.u32 q3, q3, #16
-; CHECK-NEXT:    vmul.i32 q0, q0, q1
-; CHECK-NEXT:    vshr.u32 q0, q0, #16
-; CHECK-NEXT:    vmovnt.i32 q0, q3
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmov.f32 s7, s10
+; CHECK-NEXT:    vshr.u32 q1, q1, #16
+; CHECK-NEXT:    vmovnt.i32 q1, q3
+; CHECK-NEXT:    vmulh.s16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = sext <8 x i16> %s0 to <8 x i32>
@@ -1203,25 +1147,17 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @vmulhu_kb_v8i16(<8 x i16> %s0, <8 x i32> %s1) {
 ; CHECK-LABEL: vmulhu_kb_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmovlt.u16 q4, q0
 ; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmovlb.u16 q0, q0
 ; CHECK-NEXT:    vmov.f32 s5, s6
 ; CHECK-NEXT:    vmov.f32 s14, s9
 ; CHECK-NEXT:    vmov.f32 s15, s11
 ; CHECK-NEXT:    vmov.f32 s6, s8
 ; CHECK-NEXT:    vshr.u32 q3, q3, #16
 ; CHECK-NEXT:    vmov.f32 s7, s10
-; CHECK-NEXT:    vmul.i32 q3, q4, q3
 ; CHECK-NEXT:    vshr.u32 q1, q1, #16
-; CHECK-NEXT:    vshr.u32 q3, q3, #16
-; CHECK-NEXT:    vmul.i32 q0, q0, q1
-; CHECK-NEXT:    vshr.u32 q0, q0, #16
-; CHECK-NEXT:    vmovnt.i32 q0, q3
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmovnt.i32 q1, q3
+; CHECK-NEXT:    vmulh.u16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <8 x i16> %s0 to <8 x i32>
@@ -1235,25 +1171,17 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @vmulhs_kbc_v8i16(<8 x i16> %s0, <8 x i32> %s1) {
 ; CHECK-LABEL: vmulhs_kbc_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmovlt.s16 q4, q0
 ; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmovlb.s16 q0, q0
 ; CHECK-NEXT:    vmov.f32 s5, s6
 ; CHECK-NEXT:    vmov.f32 s14, s9
 ; CHECK-NEXT:    vmov.f32 s15, s11
 ; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vshr.s32 q3, q3, #16
-; CHECK-NEXT:    vmov.f32 s7, s10
-; CHECK-NEXT:    vmul.i32 q3, q3, q4
-; CHECK-NEXT:    vshr.s32 q1, q1, #16
 ; CHECK-NEXT:    vshr.u32 q3, q3, #16
-; CHECK-NEXT:    vmul.i32 q0, q1, q0
-; CHECK-NEXT:    vshr.u32 q0, q0, #16
-; CHECK-NEXT:    vmovnt.i32 q0, q3
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmov.f32 s7, s10
+; CHECK-NEXT:    vshr.u32 q1, q1, #16
+; CHECK-NEXT:    vmovnt.i32 q1, q3
+; CHECK-NEXT:    vmulh.s16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = sext <8 x i16> %s0 to <8 x i32>
@@ -1267,25 +1195,17 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @vmulhu_kbc_v8i16(<8 x i16> %s0, <8 x i32> %s1) {
 ; CHECK-LABEL: vmulhu_kbc_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmovlt.u16 q4, q0
 ; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmovlb.u16 q0, q0
 ; CHECK-NEXT:    vmov.f32 s5, s6
 ; CHECK-NEXT:    vmov.f32 s14, s9
 ; CHECK-NEXT:    vmov.f32 s15, s11
 ; CHECK-NEXT:    vmov.f32 s6, s8
 ; CHECK-NEXT:    vshr.u32 q3, q3, #16
 ; CHECK-NEXT:    vmov.f32 s7, s10
-; CHECK-NEXT:    vmul.i32 q3, q3, q4
 ; CHECK-NEXT:    vshr.u32 q1, q1, #16
-; CHECK-NEXT:    vshr.u32 q3, q3, #16
-; CHECK-NEXT:    vmul.i32 q0, q1, q0
-; CHECK-NEXT:    vshr.u32 q0, q0, #16
-; CHECK-NEXT:    vmovnt.i32 q0, q3
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmovnt.i32 q1, q3
+; CHECK-NEXT:    vmulh.u16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <8 x i16> %s0 to <8 x i32>
diff --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll
index 69c6d8d4cf2d6..5b279e5953629 100644
--- a/llvm/test/CodeGen/X86/combine-pmuldq.ll
+++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll
@@ -381,22 +381,27 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
 ; SSE-NEXT:    pxor %xmm0, %xmm0
 ; SSE-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,2],xmm2[2,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2,0,2]
 ; SSE-NEXT:    pxor %xmm1, %xmm1
 ; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB7_1: # %loop
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE-NEXT:    pmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
-; SSE-NEXT:    pmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
-; SSE-NEXT:    pmovzxdq {{.*#+}} xmm5 = mem[0],zero,mem[1],zero
-; SSE-NEXT:    pmovzxdq {{.*#+}} xmm6 = mem[0],zero,mem[1],zero
-; SSE-NEXT:    pmuludq %xmm2, %xmm6
+; SSE-NEXT:    movdqu 2097152(%rdi,%rax), %xmm4
+; SSE-NEXT:    movdqu 2097168(%rdi,%rax), %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm3, %xmm6
 ; SSE-NEXT:    pmuludq %xmm2, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,3],xmm6[1,3]
-; SSE-NEXT:    paddd %xmm5, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE-NEXT:    pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; SSE-NEXT:    paddd %xmm5, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm3, %xmm5
 ; SSE-NEXT:    pmuludq %xmm2, %xmm4
-; SSE-NEXT:    pmuludq %xmm2, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,3],xmm3[1,3]
-; SSE-NEXT:    paddd %xmm4, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE-NEXT:    paddd %xmm4, %xmm0
 ; SSE-NEXT:    subq $-128, %rax
 ; SSE-NEXT:    jne .LBB7_1
 ; SSE-NEXT:  # %bb.2: # %end
@@ -407,27 +412,33 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX1-NEXT:    movl %esi, %eax
 ; AVX1-NEXT:    vmovq %rax, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm4
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[2,2],xmm5[2,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm4[0,2],xmm5[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[2,2],xmm5[2,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
 ; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB7_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = mem[0],zero,mem[1],zero
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = mem[0],zero,mem[1],zero
-; AVX1-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; AVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[1,3],xmm6[1,3]
-; AVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpmuludq %xmm3, %xmm1, %xmm3
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,3],xmm4[1,3]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqu 2097152(%rdi,%rax), %xmm5
+; AVX1-NEXT:    vmovdqu 2097168(%rdi,%rax), %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm7, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2,3],xmm5[4,5],xmm7[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm7, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
 ; AVX1-NEXT:    subq $-128, %rax
 ; AVX1-NEXT:    jne .LBB7_1
 ; AVX1-NEXT:  # %bb.2: # %end
@@ -440,16 +451,19 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %ymm1
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; AVX2-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT:    vshufps {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
 ; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB7_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-NEXT:    vpmuludq %ymm3, %ymm1, %ymm3
-; AVX2-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
-; AVX2-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm3[1,3],ymm2[5,7],ymm3[5,7]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vmovdqu 2097152(%rdi,%rax), %ymm3
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpmuludq %ymm1, %ymm3, %ymm3
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX2-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
 ; AVX2-NEXT:    subq $-128, %rax
 ; AVX2-NEXT:    jne .LBB7_1
 ; AVX2-NEXT:  # %bb.2: # %end
@@ -461,14 +475,18 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX512VL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; AVX512VL-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
 ; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB7_1: # %loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX512VL-NEXT:    vpmuludq %zmm2, %zmm1, %zmm2
-; AVX512VL-NEXT:    vpsrlq $32, %zmm2, %zmm2
-; AVX512VL-NEXT:    vpmovqd %zmm2, %ymm2
-; AVX512VL-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vmovdqu 2097152(%rdi,%rax), %ymm3
+; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512VL-NEXT:    vpmuludq %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmuludq %ymm1, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX512VL-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
 ; AVX512VL-NEXT:    subq $-128, %rax
 ; AVX512VL-NEXT:    jne .LBB7_1
 ; AVX512VL-NEXT:  # %bb.2: # %end
@@ -480,14 +498,18 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX512DQVL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512DQVL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512DQVL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; AVX512DQVL-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
 ; AVX512DQVL-NEXT:    .p2align 4
 ; AVX512DQVL-NEXT:  .LBB7_1: # %loop
 ; AVX512DQVL-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX512DQVL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX512DQVL-NEXT:    vpmuludq %zmm2, %zmm1, %zmm2
-; AVX512DQVL-NEXT:    vpsrlq $32, %zmm2, %zmm2
-; AVX512DQVL-NEXT:    vpmovqd %zmm2, %ymm2
-; AVX512DQVL-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVX512DQVL-NEXT:    vmovdqu 2097152(%rdi,%rax), %ymm3
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512DQVL-NEXT:    vpmuludq %ymm2, %ymm4, %ymm4
+; AVX512DQVL-NEXT:    vpmuludq %ymm1, %ymm3, %ymm3
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512DQVL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX512DQVL-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
 ; AVX512DQVL-NEXT:    subq $-128, %rax
 ; AVX512DQVL-NEXT:    jne .LBB7_1
 ; AVX512DQVL-NEXT:  # %bb.2: # %end
@@ -582,27 +604,33 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX1-NEXT:    movslq %esi, %rax
 ; AVX1-NEXT:    vmovq %rax, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm4
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[2,2],xmm5[2,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm4[0,2],xmm5[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[2,2],xmm5[2,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
 ; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB8_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX1-NEXT:    vpmovsxdq 2097152(%rdi,%rax), %xmm3
-; AVX1-NEXT:    vpmovsxdq 2097160(%rdi,%rax), %xmm4
-; AVX1-NEXT:    vpmovsxdq 2097168(%rdi,%rax), %xmm5
-; AVX1-NEXT:    vpmovsxdq 2097176(%rdi,%rax), %xmm6
-; AVX1-NEXT:    vpmuldq %xmm6, %xmm2, %xmm6
-; AVX1-NEXT:    vpmuldq %xmm5, %xmm1, %xmm5
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[1,3],xmm6[1,3]
-; AVX1-NEXT:    vpmuldq %xmm4, %xmm2, %xmm4
-; AVX1-NEXT:    vpmuldq %xmm3, %xmm1, %xmm3
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,3],xmm4[1,3]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqu 2097152(%rdi,%rax), %xmm5
+; AVX1-NEXT:    vmovdqu 2097168(%rdi,%rax), %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; AVX1-NEXT:    vpmuldq %xmm1, %xmm7, %xmm7
+; AVX1-NEXT:    vpmuldq %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2,3],xmm5[4,5],xmm7[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX1-NEXT:    vpmuldq %xmm3, %xmm7, %xmm7
+; AVX1-NEXT:    vpmuldq %xmm4, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpaddd %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
 ; AVX1-NEXT:    subq $-128, %rax
 ; AVX1-NEXT:    jne .LBB8_1
 ; AVX1-NEXT:  # %bb.2: # %end
@@ -615,16 +643,19 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %ymm1
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; AVX2-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT:    vshufps {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
 ; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB8_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX2-NEXT:    vpmovsxdq 2097152(%rdi,%rax), %ymm2
-; AVX2-NEXT:    vpmovsxdq 2097168(%rdi,%rax), %ymm3
-; AVX2-NEXT:    vpmuldq %ymm3, %ymm1, %ymm3
-; AVX2-NEXT:    vpmuldq %ymm2, %ymm1, %ymm2
-; AVX2-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm3[1,3],ymm2[5,7],ymm3[5,7]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vmovdqu 2097152(%rdi,%rax), %ymm3
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
+; AVX2-NEXT:    vpmuldq %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vpmuldq %ymm1, %ymm3, %ymm3
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX2-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
 ; AVX2-NEXT:    subq $-128, %rax
 ; AVX2-NEXT:    jne .LBB8_1
 ; AVX2-NEXT:  # %bb.2: # %end
@@ -636,14 +667,18 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX512VL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; AVX512VL-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
 ; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB8_1: # %loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX512VL-NEXT:    vpmuldq %zmm2, %zmm1, %zmm2
-; AVX512VL-NEXT:    vpsrlq $32, %zmm2, %zmm2
-; AVX512VL-NEXT:    vpmovqd %zmm2, %ymm2
-; AVX512VL-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vmovdqu 2097152(%rdi,%rax), %ymm3
+; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512VL-NEXT:    vpmuldq %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpmuldq %ymm1, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX512VL-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
 ; AVX512VL-NEXT:    subq $-128, %rax
 ; AVX512VL-NEXT:    jne .LBB8_1
 ; AVX512VL-NEXT:  # %bb.2: # %end
@@ -655,14 +690,18 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX512DQVL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512DQVL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512DQVL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
+; AVX512DQVL-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
 ; AVX512DQVL-NEXT:    .p2align 4
 ; AVX512DQVL-NEXT:  .LBB8_1: # %loop
 ; AVX512DQVL-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX512DQVL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX512DQVL-NEXT:    vpmuldq %zmm2, %zmm1, %zmm2
-; AVX512DQVL-NEXT:    vpsrlq $32, %zmm2, %zmm2
-; AVX512DQVL-NEXT:    vpmovqd %zmm2, %ymm2
-; AVX512DQVL-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVX512DQVL-NEXT:    vmovdqu 2097152(%rdi,%rax), %ymm3
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512DQVL-NEXT:    vpmuldq %ymm2, %ymm4, %ymm4
+; AVX512DQVL-NEXT:    vpmuldq %ymm1, %ymm3, %ymm3
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
+; AVX512DQVL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7]
+; AVX512DQVL-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
 ; AVX512DQVL-NEXT:    subq $-128, %rax
 ; AVX512DQVL-NEXT:    jne .LBB8_1
 ; AVX512DQVL-NEXT:  # %bb.2: # %end



More information about the llvm-commits mailing list