[llvm] eea53b1 - [DAGCombiner] Optimize SMULO/UMULO if we can prove that overflow is impossible.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 26 15:24:53 PST 2021
Author: Craig Topper
Date: 2021-02-26T14:50:03-08:00
New Revision: eea53b142d91c6dc8aae2a9727b4f48508d7b147
URL: https://github.com/llvm/llvm-project/commit/eea53b142d91c6dc8aae2a9727b4f48508d7b147
DIFF: https://github.com/llvm/llvm-project/commit/eea53b142d91c6dc8aae2a9727b4f48508d7b147.diff
LOG: [DAGCombiner] Optimize SMULO/UMULO if we can prove that overflow is impossible.
Using ComputeNumSignBits or computeKnownBits we might be able
to determine that overflow is impossible.
This especially helps after type legalization if the type was
promoted from a type with half the bits or more. Type legalization
conservatively creates a promoted smulo/umulo and an overflow
check for the promoted bits. The overflow from the promoted
smulo/umulo is ORed with the result of the promoted bits
overflow check. Proving that the promoted smulo/umulo can never
overflow will leave us with just the promoted bits overflow check.
Reviewed By: RKSimon
Differential Revision: https://reviews.llvm.org/D97160
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/test/CodeGen/AArch64/vec_umulo.ll
llvm/test/CodeGen/RISCV/xaluo.ll
llvm/test/CodeGen/X86/vec_smulo.ll
llvm/test/CodeGen/X86/vec_umulo.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 397b8f85b3d2..686c7a47b352 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4660,6 +4660,28 @@ SDValue DAGCombiner::visitMULO(SDNode *N) {
return DAG.getNode(IsSigned ? ISD::SADDO : ISD::UADDO, DL,
N->getVTList(), N0, N0);
+ if (IsSigned) {
+ // Multiplying n * m significant bits yields a result of n + m significant
+ // bits. If the total number of significant bits does not exceed the
+ // result bit width (minus 1), there is no overflow.
+ unsigned SignBits = DAG.ComputeNumSignBits(N0);
+ if (SignBits > 1)
+ SignBits += DAG.ComputeNumSignBits(N1);
+ if (SignBits > VT.getScalarSizeInBits() + 1)
+ return CombineTo(N, DAG.getNode(ISD::MUL, DL, VT, N0, N1),
+ DAG.getConstant(0, DL, CarryVT));
+ } else {
+ KnownBits N1Known = DAG.computeKnownBits(N1);
+ if (N1Known.Zero.getBoolValue()) {
+ KnownBits N0Known = DAG.computeKnownBits(N0);
+ bool Overflow;
+ (void)N0Known.getMaxValue().umul_ov(N1Known.getMaxValue(), Overflow);
+ if (!Overflow)
+ return CombineTo(N, DAG.getNode(ISD::MUL, DL, VT, N0, N1),
+ DAG.getConstant(0, DL, CarryVT));
+ }
+ }
+
return SDValue();
}
diff --git a/llvm/test/CodeGen/AArch64/vec_umulo.ll b/llvm/test/CodeGen/AArch64/vec_umulo.ll
index 880a5926b443..c84c76f7f88d 100644
--- a/llvm/test/CodeGen/AArch64/vec_umulo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_umulo.ll
@@ -294,21 +294,17 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; CHECK-NEXT: movi v2.4h, #1
; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: umull v0.4s, v0.4h, v1.4h
-; CHECK-NEXT: xtn v2.4h, v0.4s
-; CHECK-NEXT: umov w9, v2.h[1]
-; CHECK-NEXT: umov w8, v2.h[0]
+; CHECK-NEXT: mul v1.4h, v0.4h, v1.4h
+; CHECK-NEXT: umov w9, v1.h[1]
+; CHECK-NEXT: umov w8, v1.h[0]
; CHECK-NEXT: and w9, w9, #0x1
-; CHECK-NEXT: shrn v1.4h, v0.4s, #16
; CHECK-NEXT: bfi w8, w9, #1, #1
-; CHECK-NEXT: umov w9, v2.h[2]
-; CHECK-NEXT: cmeq v0.4h, v1.4h, #0
-; CHECK-NEXT: ushr v1.4h, v2.4h, #1
+; CHECK-NEXT: umov w9, v1.h[2]
; CHECK-NEXT: and w9, w9, #0x1
-; CHECK-NEXT: cmtst v1.4h, v1.4h, v1.4h
+; CHECK-NEXT: ushr v0.4h, v1.4h, #1
; CHECK-NEXT: bfi w8, w9, #2, #1
-; CHECK-NEXT: umov w9, v2.h[3]
-; CHECK-NEXT: orn v0.8b, v1.8b, v0.8b
+; CHECK-NEXT: umov w9, v1.h[3]
+; CHECK-NEXT: cmtst v0.4h, v0.4h, v0.4h
; CHECK-NEXT: bfi w8, w9, #3, #29
; CHECK-NEXT: sshll v0.4s, v0.4h, #0
; CHECK-NEXT: and w8, w8, #0xf
diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index e7772026fce1..7d8b3854a51e 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -425,12 +425,8 @@ define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: mul a3, a0, a1
-; RV64-NEXT: mulw a4, a0, a1
-; RV64-NEXT: xor a4, a4, a3
-; RV64-NEXT: mulh a0, a0, a1
-; RV64-NEXT: srai a1, a3, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: or a0, a4, a0
+; RV64-NEXT: mulw a0, a0, a1
+; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a2)
; RV64-NEXT: ret
@@ -459,12 +455,8 @@ define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) {
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: addi a2, zero, 13
; RV64-NEXT: mul a3, a0, a2
-; RV64-NEXT: mulw a4, a0, a2
-; RV64-NEXT: xor a4, a4, a3
-; RV64-NEXT: mulh a0, a0, a2
-; RV64-NEXT: srai a2, a3, 63
-; RV64-NEXT: xor a0, a0, a2
-; RV64-NEXT: or a0, a4, a0
+; RV64-NEXT: mulw a0, a0, a2
+; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a1)
; RV64-NEXT: ret
@@ -575,10 +567,8 @@ define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
; RV64-NEXT: srli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: mulhu a3, a0, a1
; RV64-NEXT: mul a1, a0, a1
; RV64-NEXT: srli a0, a1, 32
-; RV64-NEXT: or a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a1, 0(a2)
; RV64-NEXT: ret
@@ -606,10 +596,8 @@ define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) {
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: addi a2, zero, 13
-; RV64-NEXT: mulhu a3, a0, a2
; RV64-NEXT: mul a2, a0, a2
; RV64-NEXT: srli a0, a2, 32
-; RV64-NEXT: or a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a2, 0(a1)
; RV64-NEXT: ret
@@ -1209,14 +1197,8 @@ define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
; RV64-NEXT: sext.w a2, a1
; RV64-NEXT: sext.w a3, a0
; RV64-NEXT: mul a4, a3, a2
-; RV64-NEXT: mulw a5, a3, a2
-; RV64-NEXT: xor a5, a5, a4
-; RV64-NEXT: mulh a2, a3, a2
-; RV64-NEXT: srai a3, a4, 63
-; RV64-NEXT: xor a2, a2, a3
-; RV64-NEXT: or a2, a5, a2
-; RV64-NEXT: snez a2, a2
-; RV64-NEXT: bnez a2, .LBB38_2
+; RV64-NEXT: mulw a2, a3, a2
+; RV64-NEXT: bne a2, a4, .LBB38_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB38_2: # %entry
@@ -1243,12 +1225,8 @@ define i1 @smulo.not.i32(i32 %v1, i32 %v2) {
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: mul a2, a0, a1
-; RV64-NEXT: mulw a3, a0, a1
-; RV64-NEXT: xor a3, a3, a2
-; RV64-NEXT: mulh a0, a0, a1
-; RV64-NEXT: srai a1, a2, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: or a0, a3, a0
+; RV64-NEXT: mulw a0, a0, a1
+; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
entry:
@@ -1363,11 +1341,8 @@ define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
; RV64-NEXT: srli a2, a2, 32
; RV64-NEXT: slli a3, a0, 32
; RV64-NEXT: srli a3, a3, 32
-; RV64-NEXT: mulhu a4, a3, a2
; RV64-NEXT: mul a2, a3, a2
; RV64-NEXT: srli a2, a2, 32
-; RV64-NEXT: or a2, a2, a4
-; RV64-NEXT: snez a2, a2
; RV64-NEXT: bnez a2, .LBB42_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
@@ -1393,10 +1368,8 @@ define i1 @umulo.not.i32(i32 %v1, i32 %v2) {
; RV64-NEXT: srli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: mulhu a2, a0, a1
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: or a0, a0, a2
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
entry:
@@ -1843,13 +1816,8 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: mul a2, a0, a1
-; RV64-NEXT: mulw a3, a0, a1
-; RV64-NEXT: xor a3, a3, a2
-; RV64-NEXT: mulh a0, a0, a1
-; RV64-NEXT: srai a1, a2, 63
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: or a0, a3, a0
-; RV64-NEXT: beqz a0, .LBB54_2
+; RV64-NEXT: mulw a0, a0, a1
+; RV64-NEXT: beq a0, a2, .LBB54_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
@@ -1984,10 +1952,8 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
; RV64-NEXT: srli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: mulhu a2, a0, a1
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: or a0, a0, a2
; RV64-NEXT: beqz a0, .LBB57_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll
index 0941810ccb30..53fb333143d7 100644
--- a/llvm/test/CodeGen/X86/vec_smulo.ll
+++ b/llvm/test/CodeGen/X86/vec_smulo.ll
@@ -3371,223 +3371,154 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
; SSE2-LABEL: smulo_v4i1:
; SSE2: # %bb.0:
-; SSE2-NEXT: pslld $31, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pslld $31, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: paddd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: psubd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: pslld $31, %xmm0
-; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pslld $31, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: smulo_v4i1:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pslld $31, %xmm0
-; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: pslld $31, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3
-; SSSE3-NEXT: pand %xmm0, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2
-; SSSE3-NEXT: pand %xmm1, %xmm2
-; SSSE3-NEXT: paddd %xmm3, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm3, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSSE3-NEXT: psubd %xmm2, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: pslld $31, %xmm0
-; SSSE3-NEXT: movmskps %xmm0, %eax
; SSSE3-NEXT: psrad $31, %xmm0
-; SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
-; SSSE3-NEXT: psrad $31, %xmm2
-; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
-; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
-; SSSE3-NEXT: pxor %xmm1, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pmuludq %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pmuludq %xmm2, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pslld $31, %xmm1
+; SSSE3-NEXT: movmskps %xmm1, %eax
+; SSSE3-NEXT: psrad $31, %xmm1
+; SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0
; SSSE3-NEXT: pxor %xmm1, %xmm0
-; SSSE3-NEXT: por %xmm2, %xmm0
; SSSE3-NEXT: movb %al, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: smulo_v4i1:
; SSE41: # %bb.0:
+; SSE41-NEXT: pslld $31, %xmm1
+; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
+; SSE41-NEXT: pmulld %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pslld $31, %xmm1
+; SSE41-NEXT: movmskps %xmm1, %eax
; SSE41-NEXT: psrad $31, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pmuldq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pslld $31, %xmm3
-; SSE41-NEXT: movmskps %xmm3, %eax
-; SSE41-NEXT: psrad $31, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm3
-; SSE41-NEXT: psrad $31, %xmm1
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: movb %al, (%rdi)
+; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: smulo_v4i1:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpslld $31, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm3
-; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovmskps %xmm3, %eax
+; AVX1-NEXT: vmovmskps %xmm1, %eax
; AVX1-NEXT: movb %al, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: smulo_v4i1:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
+; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
+; AVX2-NEXT: vpslld $31, %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpslld $31, %xmm0, %xmm3
-; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
-; AVX2-NEXT: vpcmpeqd %xmm0, %xmm4, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovmskps %xmm3, %eax
+; AVX2-NEXT: vmovmskps %xmm1, %eax
; AVX2-NEXT: movb %al, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: smulo_v4i1:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vpslld $31, %xmm1, %xmm1
-; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k0
-; AVX512-NEXT: kshiftrw $3, %k0, %k1
-; AVX512-NEXT: kmovd %k1, %r10d
-; AVX512-NEXT: andb $1, %r10b
-; AVX512-NEXT: negb %r10b
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512-NEXT: kshiftrw $3, %k0, %k1
+; AVX512-NEXT: kmovd %k1, %r8d
+; AVX512-NEXT: andb $1, %r8b
+; AVX512-NEXT: negb %r8b
+; AVX512-NEXT: vpslld $31, %xmm1, %xmm0
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1
; AVX512-NEXT: kshiftrw $3, %k1, %k2
; AVX512-NEXT: kmovd %k2, %r9d
; AVX512-NEXT: andb $1, %r9b
; AVX512-NEXT: negb %r9b
-; AVX512-NEXT: kshiftrw $2, %k1, %k2
-; AVX512-NEXT: kmovd %k2, %ebp
-; AVX512-NEXT: andb $1, %bpl
-; AVX512-NEXT: negb %bpl
; AVX512-NEXT: kshiftrw $2, %k0, %k2
-; AVX512-NEXT: kmovd %k2, %edx
-; AVX512-NEXT: andb $1, %dl
-; AVX512-NEXT: negb %dl
+; AVX512-NEXT: kmovd %k2, %r10d
+; AVX512-NEXT: andb $1, %r10b
+; AVX512-NEXT: negb %r10b
+; AVX512-NEXT: kshiftrw $2, %k1, %k2
+; AVX512-NEXT: kmovd %k2, %ebx
+; AVX512-NEXT: andb $1, %bl
+; AVX512-NEXT: negb %bl
; AVX512-NEXT: kshiftrw $1, %k0, %k2
-; AVX512-NEXT: kmovd %k2, %esi
-; AVX512-NEXT: andb $1, %sil
-; AVX512-NEXT: negb %sil
-; AVX512-NEXT: kshiftrw $1, %k1, %k2
; AVX512-NEXT: kmovd %k2, %ecx
; AVX512-NEXT: andb $1, %cl
; AVX512-NEXT: negb %cl
-; AVX512-NEXT: kmovd %k1, %eax
+; AVX512-NEXT: kshiftrw $1, %k1, %k2
+; AVX512-NEXT: kmovd %k2, %esi
+; AVX512-NEXT: andb $1, %sil
+; AVX512-NEXT: negb %sil
+; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: andb $1, %al
; AVX512-NEXT: negb %al
-; AVX512-NEXT: kmovd %k0, %ebx
-; AVX512-NEXT: andb $1, %bl
-; AVX512-NEXT: negb %bl
+; AVX512-NEXT: kmovd %k1, %edx
+; AVX512-NEXT: andb $1, %dl
+; AVX512-NEXT: negb %dl
; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: imulb %bl
+; AVX512-NEXT: mulb %dl
; AVX512-NEXT: movl %eax, %r11d
-; AVX512-NEXT: seto %al
-; AVX512-NEXT: movl %r11d, %ebx
-; AVX512-NEXT: andb $1, %bl
-; AVX512-NEXT: negb %bl
-; AVX512-NEXT: cmpb %r11b, %bl
-; AVX512-NEXT: setne %bl
-; AVX512-NEXT: orb %al, %bl
+; AVX512-NEXT: andb $1, %al
+; AVX512-NEXT: negb %al
+; AVX512-NEXT: cmpb %r11b, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: movw $-3, %ax
; AVX512-NEXT: kmovd %eax, %k0
; AVX512-NEXT: kandw %k0, %k1, %k1
; AVX512-NEXT: movl %ecx, %eax
-; AVX512-NEXT: imulb %sil
-; AVX512-NEXT: movl %eax, %r8d
-; AVX512-NEXT: seto %al
-; AVX512-NEXT: movl %r8d, %ecx
-; AVX512-NEXT: andb $1, %cl
-; AVX512-NEXT: negb %cl
-; AVX512-NEXT: cmpb %r8b, %cl
-; AVX512-NEXT: setne %cl
-; AVX512-NEXT: orb %al, %cl
+; AVX512-NEXT: mulb %sil
+; AVX512-NEXT: movl %eax, %ecx
+; AVX512-NEXT: andb $1, %al
+; AVX512-NEXT: negb %al
+; AVX512-NEXT: cmpb %cl, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: kshiftlw $15, %k2, %k2
@@ -3596,34 +3527,27 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: movw $-5, %ax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: kandw %k1, %k2, %k2
-; AVX512-NEXT: movl %ebp, %eax
-; AVX512-NEXT: imulb %dl
-; AVX512-NEXT: movl %eax, %esi
-; AVX512-NEXT: seto %al
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: andb $1, %cl
-; AVX512-NEXT: negb %cl
-; AVX512-NEXT: cmpb %sil, %cl
-; AVX512-NEXT: setne %cl
-; AVX512-NEXT: orb %al, %cl
+; AVX512-NEXT: movl %r10d, %eax
+; AVX512-NEXT: mulb %bl
+; AVX512-NEXT: movl %eax, %edx
+; AVX512-NEXT: andb $1, %al
+; AVX512-NEXT: negb %al
+; AVX512-NEXT: cmpb %dl, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k3
; AVX512-NEXT: kshiftlw $2, %k3, %k3
; AVX512-NEXT: korw %k3, %k2, %k2
; AVX512-NEXT: kshiftlw $13, %k2, %k2
; AVX512-NEXT: kshiftrw $13, %k2, %k2
-; AVX512-NEXT: movl %r9d, %eax
-; AVX512-NEXT: imulb %r10b
+; AVX512-NEXT: movl %r8d, %eax
+; AVX512-NEXT: mulb %r9b
; AVX512-NEXT: # kill: def $al killed $al def $eax
-; AVX512-NEXT: seto %cl
-; AVX512-NEXT: movl %eax, %edx
-; AVX512-NEXT: andb $1, %dl
-; AVX512-NEXT: negb %dl
-; AVX512-NEXT: cmpb %al, %dl
-; AVX512-NEXT: setne %dl
-; AVX512-NEXT: orb %cl, %dl
-; AVX512-NEXT: setne %cl
-; AVX512-NEXT: kmovd %ecx, %k3
+; AVX512-NEXT: movl %eax, %ebx
+; AVX512-NEXT: andb $1, %bl
+; AVX512-NEXT: negb %bl
+; AVX512-NEXT: cmpb %al, %bl
+; AVX512-NEXT: setne %sil
+; AVX512-NEXT: kmovd %esi, %k3
; AVX512-NEXT: kshiftlw $3, %k3, %k3
; AVX512-NEXT: korw %k3, %k2, %k2
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -3631,12 +3555,12 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: andl $1, %r11d
; AVX512-NEXT: kmovw %r11d, %k2
; AVX512-NEXT: kandw %k0, %k2, %k0
-; AVX512-NEXT: kmovd %r8d, %k2
+; AVX512-NEXT: kmovd %ecx, %k2
; AVX512-NEXT: kshiftlw $15, %k2, %k2
; AVX512-NEXT: kshiftrw $14, %k2, %k2
; AVX512-NEXT: korw %k2, %k0, %k0
; AVX512-NEXT: kandw %k1, %k0, %k0
-; AVX512-NEXT: kmovd %esi, %k1
+; AVX512-NEXT: kmovd %edx, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $13, %k1, %k1
; AVX512-NEXT: korw %k1, %k0, %k0
@@ -3650,7 +3574,6 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, (%rdi)
; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%t = call {<4 x i1>, <4 x i1>} @llvm.smul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1)
%val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll
index ef028899836e..5e0e23c1509f 100644
--- a/llvm/test/CodeGen/X86/vec_umulo.ll
+++ b/llvm/test/CodeGen/X86/vec_umulo.ll
@@ -3014,112 +3014,35 @@ define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
}
define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
-; SSE2-LABEL: umulo_v4i1:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE2-NEXT: movdqa %xmm5, %xmm0
-; SSE2-NEXT: psrld $1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: pslld $31, %xmm5
-; SSE2-NEXT: movmskps %xmm5, %eax
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: umulo_v4i1:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
-; SSSE3-NEXT: pand %xmm2, %xmm1
-; SSSE3-NEXT: pand %xmm2, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm2, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
-; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
-; SSSE3-NEXT: pxor %xmm4, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSSE3-NEXT: movdqa %xmm5, %xmm0
-; SSSE3-NEXT: psrld $1, %xmm0
-; SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
-; SSSE3-NEXT: pxor %xmm4, %xmm0
-; SSSE3-NEXT: por %xmm3, %xmm0
-; SSSE3-NEXT: pslld $31, %xmm5
-; SSSE3-NEXT: movmskps %xmm5, %eax
-; SSSE3-NEXT: movb %al, (%rdi)
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: umulo_v4i1:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: pand %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: pmaddwd %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pxor %xmm3, %xmm2
-; SSE41-NEXT: por %xmm4, %xmm2
-; SSE41-NEXT: pslld $31, %xmm1
-; SSE41-NEXT: movmskps %xmm1, %eax
-; SSE41-NEXT: movb %al, (%rdi)
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: umulo_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pmaddwd %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pslld $31, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: movb %al, (%rdi)
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: umulo_v4i1:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1]
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vmovmskps %xmm1, %eax
; AVX1-NEXT: movb %al, (%rdi)
@@ -3128,23 +3051,14 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX2-LABEL: umulo_v4i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
-; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpsrld $1, %xmm1, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
; AVX2-NEXT: vmovmskps %xmm1, %eax
; AVX2-NEXT: movb %al, (%rdi)
@@ -3152,7 +3066,6 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
;
; AVX512-LABEL: umulo_v4i1:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0
@@ -3174,31 +3087,25 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: kmovd %k2, %ecx
; AVX512-NEXT: andb $1, %cl
; AVX512-NEXT: kshiftrw $1, %k1, %k2
-; AVX512-NEXT: kmovd %k2, %esi
-; AVX512-NEXT: andb $1, %sil
+; AVX512-NEXT: kmovd %k2, %edx
+; AVX512-NEXT: andb $1, %dl
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: andb $1, %al
-; AVX512-NEXT: kmovd %k1, %edx
-; AVX512-NEXT: andb $1, %dl
+; AVX512-NEXT: kmovd %k1, %esi
+; AVX512-NEXT: andb $1, %sil
+; AVX512-NEXT: movw $-3, %bx
+; AVX512-NEXT: kmovd %ebx, %k0
; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: mulb %dl
-; AVX512-NEXT: movl %eax, %edx
-; AVX512-NEXT: seto %al
-; AVX512-NEXT: testb $-2, %dl
-; AVX512-NEXT: setne %bl
-; AVX512-NEXT: orb %al, %bl
+; AVX512-NEXT: mulb %sil
+; AVX512-NEXT: movl %eax, %esi
+; AVX512-NEXT: testb $2, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k1
-; AVX512-NEXT: movw $-3, %ax
-; AVX512-NEXT: kmovd %eax, %k0
; AVX512-NEXT: kandw %k0, %k1, %k1
; AVX512-NEXT: movl %ecx, %eax
-; AVX512-NEXT: mulb %sil
-; AVX512-NEXT: movl %eax, %ebp
-; AVX512-NEXT: seto %al
-; AVX512-NEXT: testb $-2, %bpl
-; AVX512-NEXT: setne %bl
-; AVX512-NEXT: orb %al, %bl
+; AVX512-NEXT: mulb %dl
+; AVX512-NEXT: movl %eax, %ecx
+; AVX512-NEXT: testb $2, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: kshiftlw $15, %k2, %k2
@@ -3209,11 +3116,8 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: kandw %k1, %k2, %k2
; AVX512-NEXT: movl %r10d, %eax
; AVX512-NEXT: mulb %r11b
-; AVX512-NEXT: movl %eax, %esi
-; AVX512-NEXT: seto %al
-; AVX512-NEXT: testb $-2, %sil
-; AVX512-NEXT: setne %bl
-; AVX512-NEXT: orb %al, %bl
+; AVX512-NEXT: movl %eax, %edx
+; AVX512-NEXT: testb $2, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k3
; AVX512-NEXT: kshiftlw $2, %k3, %k3
@@ -3223,25 +3127,22 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: movl %r8d, %eax
; AVX512-NEXT: mulb %r9b
; AVX512-NEXT: # kill: def $al killed $al def $eax
-; AVX512-NEXT: seto %bl
-; AVX512-NEXT: testb $-2, %al
-; AVX512-NEXT: setne %cl
-; AVX512-NEXT: orb %bl, %cl
-; AVX512-NEXT: setne %cl
-; AVX512-NEXT: kmovd %ecx, %k3
+; AVX512-NEXT: testb $2, %al
+; AVX512-NEXT: setne %bl
+; AVX512-NEXT: kmovd %ebx, %k3
; AVX512-NEXT: kshiftlw $3, %k3, %k3
; AVX512-NEXT: korw %k3, %k2, %k2
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k2} {z}
-; AVX512-NEXT: andl $1, %edx
-; AVX512-NEXT: kmovw %edx, %k2
+; AVX512-NEXT: andl $1, %esi
+; AVX512-NEXT: kmovw %esi, %k2
; AVX512-NEXT: kandw %k0, %k2, %k0
-; AVX512-NEXT: kmovd %ebp, %k2
+; AVX512-NEXT: kmovd %ecx, %k2
; AVX512-NEXT: kshiftlw $15, %k2, %k2
; AVX512-NEXT: kshiftrw $14, %k2, %k2
; AVX512-NEXT: korw %k2, %k0, %k0
; AVX512-NEXT: kandw %k1, %k0, %k0
-; AVX512-NEXT: kmovd %esi, %k1
+; AVX512-NEXT: kmovd %edx, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $13, %k1, %k1
; AVX512-NEXT: korw %k1, %k0, %k0
@@ -3255,7 +3156,6 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, (%rdi)
; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%t = call {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1)
%val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
More information about the llvm-commits
mailing list