[llvm] 7425bdb - [X86] Add test cases for 'abs from mul patterns' (PR45691)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat May 9 06:53:48 PDT 2020


Author: Simon Pilgrim
Date: 2020-05-09T14:53:25+01:00
New Revision: 7425bdbd2faa9483be71dc5674264540d3cb4b84

URL: https://github.com/llvm/llvm-project/commit/7425bdbd2faa9483be71dc5674264540d3cb4b84
DIFF: https://github.com/llvm/llvm-project/commit/7425bdbd2faa9483be71dc5674264540d3cb4b84.diff

LOG: [X86] Add test cases for 'abs from mul patterns' (PR45691)

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index 98fbd37f0d64..3282d5085592 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -354,6 +354,85 @@ define <4 x i32> @combine_mul_abs_v4i32(<4 x i32> %0) {
   ret <4 x i32> %m
 }
 
+; TODO fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X)
+
+define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) {
+; SSE-LABEL: combine_mul_to_abs_v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtb %xmm0, %xmm2
+; SSE-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE-NEXT:    pmullw %xmm0, %xmm2
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    pmullw %xmm3, %xmm1
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    packuswb %xmm2, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_mul_to_abs_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vpor {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX-NEXT:    vpmullw %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %s = ashr <16 x i8> %x, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  %o = or <16 x i8> %s, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %m = mul <16 x i8> %o, %x
+  ret <16 x i8> %m
+}
+
+define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) {
+; SSE-LABEL: combine_mul_to_abs_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT:    por {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    psrlq $32, %xmm2
+; SSE-NEXT:    pmuludq %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    psrlq $32, %xmm3
+; SSE-NEXT:    pmuludq %xmm0, %xmm3
+; SSE-NEXT:    paddq %xmm2, %xmm3
+; SSE-NEXT:    psllq $32, %xmm3
+; SSE-NEXT:    pmuludq %xmm1, %xmm0
+; SSE-NEXT:    paddq %xmm3, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_mul_to_abs_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vpor {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm2
+; AVX-NEXT:    vpmuludq %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX-NEXT:    vpmuludq %xmm3, %xmm0, %xmm3
+; AVX-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %s = ashr <2 x i64> %x, <i64 63, i64 63>
+  %o = or <2 x i64> %s, <i64 1, i64 1>
+  %m = mul <2 x i64> %x, %o
+  ret <2 x i64> %m
+}
+
 ; This would infinite loop because DAGCombiner wants to turn this into a shift,
 ; but x86 lowering wants to avoid non-uniform vector shift amounts.
 


        


More information about the llvm-commits mailing list