[llvm] r338833 - [X86] Add example of 'zero shift' guards on rotation patterns (PR34924)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 3 02:20:02 PDT 2018


Author: rksimon
Date: Fri Aug  3 02:20:02 2018
New Revision: 338833

URL: http://llvm.org/viewvc/llvm-project?rev=338833&view=rev
Log:
[X86] Add example of 'zero shift' guards on rotation patterns (PR34924)

Basic pattern that leaves an unnecessary select on a rotation by zero result. This variant is trivial - the more general case with a compare+branch to prevent execution of undefined shifts is more tricky.

Modified:
    llvm/trunk/test/CodeGen/X86/combine-rotates.ll

Modified: llvm/trunk/test/CodeGen/X86/combine-rotates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-rotates.ll?rev=338833&r1=338832&r2=338833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-rotates.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-rotates.ll Fri Aug  3 02:20:02 2018
@@ -94,6 +94,96 @@ define <4 x i32> @combine_vec_rot_rot_sp
   ret <4 x i32> %6
 }
 
+; TODO - fold (select (icmp eq c, 0), x, (rot x, c)) -> rot x, c
+define i32 @combine_rot_select_zero(i32, i32) {
+; CHECK-LABEL: combine_rot_select_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    roll %cl, %eax
+; CHECK-NEXT:    testl %esi, %esi
+; CHECK-NEXT:    cmovel %edi, %eax
+; CHECK-NEXT:    retq
+  %3 = and i32 %1, 31
+  %4 = shl i32 %0, %3
+  %5 = sub i32 0, %1
+  %6 = and i32 %5, 31
+  %7 = lshr i32 %0, %6
+  %8 = or i32 %4, %7
+  %9 = icmp eq i32 %1, 0
+  %10 = select i1 %9, i32 %0, i32 %8
+  ret i32 %10
+}
+
+define <4 x i32> @combine_vec_rot_select_zero(<4 x i32>, <4 x i32>) {
+; SSE2-LABEL: combine_vec_rot_select_zero:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [31,31,31,31]
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pslld $23, %xmm2
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    cvttps2dq %xmm2, %xmm2
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm6, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm4, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
+; XOP-LABEL: combine_vec_rot_select_zero:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm2
+; XOP-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; XOP-NEXT:    vprotd %xmm2, %xmm0, %xmm2
+; XOP-NEXT:    vpcomeqd %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX2-LABEL: combine_vec_rot_select_zero:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpsllvd %xmm2, %xmm0, %xmm4
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm5 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm2, %xmm5, %xmm2
+; AVX2-NEXT:    vpsrlvd %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: combine_vec_rot_select_zero:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm2
+; AVX512-NEXT:    vprolvd %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vptestnmd %xmm1, %xmm1, %k1
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm2 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm0
+; AVX512-NEXT:    retq
+  %3 = and <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
+  %4 = shl <4 x i32> %0, %3
+  %5 = sub <4 x i32> zeroinitializer, %1
+  %6 = and <4 x i32> %5, <i32 31, i32 31, i32 31, i32 31>
+  %7 = lshr <4 x i32> %0, %6
+  %8 = or <4 x i32> %4, %7
+  %9 = icmp eq <4 x i32> %1, zeroinitializer
+  %10 = select <4 x i1> %9, <4 x i32> %0, <4 x i32> %8
+  ret <4 x i32> %10
+}
+
 define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
 ; SSE2-LABEL: rotate_demanded_bits:
 ; SSE2:       # %bb.0:




More information about the llvm-commits mailing list