[llvm] r336773 - [DAGCombiner] Add (urem X, -1) -> select(X == -1, 0, x) fold
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 11 02:14:37 PDT 2018
Author: rksimon
Date: Wed Jul 11 02:14:37 2018
New Revision: 336773
URL: http://llvm.org/viewvc/llvm-project?rev=336773&view=rev
Log:
[DAGCombiner] Add (urem X, -1) -> select(X == -1, 0, x) fold
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/test/CodeGen/X86/combine-urem.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=336773&r1=336772&r2=336773&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Wed Jul 11 02:14:37 2018
@@ -3244,6 +3244,8 @@ SDValue DAGCombiner::visitREM(SDNode *N)
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
+ EVT CCVT = getSetCCResultType(VT);
+
bool isSigned = (Opcode == ISD::SREM);
SDLoc DL(N);
@@ -3253,6 +3255,10 @@ SDValue DAGCombiner::visitREM(SDNode *N)
if (N0C && N1C)
if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C))
return Folded;
+ // fold (urem X, -1) -> select(X == -1, 0, x)
+ if (!isSigned && N1C && N1C->getAPIntValue().isAllOnesValue())
+ return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
+ DAG.getConstant(0, DL, VT), N0);
if (SDValue V = simplifyDivRem(N, DAG))
return V;
Modified: llvm/trunk/test/CodeGen/X86/combine-urem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-urem.ll?rev=336773&r1=336772&r2=336773&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-urem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-urem.ll Wed Jul 11 02:14:37 2018
@@ -27,17 +27,13 @@ define <4 x i32> @combine_vec_urem_by_on
ret <4 x i32> %1
}
-; TODO fold (urem x, -1) -> select((icmp eq x, -1), 0, x)
+; fold (urem x, -1) -> select((icmp eq x, -1), 0, x)
define i32 @combine_urem_by_negone(i32 %x) {
; CHECK-LABEL: combine_urem_by_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: movq %rax, %rcx
-; CHECK-NEXT: shlq $31, %rcx
-; CHECK-NEXT: addq %rax, %rcx
-; CHECK-NEXT: sarq $63, %rcx
-; CHECK-NEXT: subl %ecx, %edi
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl $-1, %edi
+; CHECK-NEXT: cmovnel %edi, %eax
; CHECK-NEXT: retq
%1 = urem i32 %x, -1
ret i32 %1
@@ -46,40 +42,18 @@ define i32 @combine_urem_by_negone(i32 %
define <4 x i32> @combine_vec_urem_by_negone(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_by_negone:
; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483649,2147483649,2147483649,2147483649]
-; SSE-NEXT: pmuludq %xmm2, %xmm1
-; SSE-NEXT: pmuludq %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: psubd %xmm2, %xmm0
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX1-LABEL: combine_vec_urem_by_negone:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483649,2147483649,2147483649,2147483649]
-; AVX1-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: combine_vec_urem_by_negone:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483649,2147483649,2147483649,2147483649]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: combine_vec_urem_by_negone:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpandn %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = urem <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %1
}
More information about the llvm-commits
mailing list