[llvm] r369102 - [X86] Add test case for future MULFIX DAG combine folds. NFC

Bjorn Pettersson via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 16 06:16:38 PDT 2019


Author: bjope
Date: Fri Aug 16 06:16:38 2019
New Revision: 369102

URL: http://llvm.org/viewvc/llvm-project?rev=369102&view=rev
Log:
[X86] Add test case for future MULFIX DAG combine folds. NFC

Add some test cases displaying the lack of DAG combine
folds for SMULFIX/UMULFIX/SMULFIXSAT when either
multiplicand is undef or zero.

It seems like widening vector legalization for X86 can
introduce fixed point multiplication of undef values.
So that is one way that such operations could appear
during ISel.

Multiplication with zero is probably more unlikely, and
could potentially be handled by InstCombine. But I do
not think it would hurt to do such folds in DAGCombiner.

This patch only adds the test case. The folds will be
added in a follow up patch.

Added:
    llvm/trunk/test/CodeGen/X86/mulfix_combine.ll

Added: llvm/trunk/test/CodeGen/X86/mulfix_combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mulfix_combine.ll?rev=369102&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mulfix_combine.ll (added)
+++ llvm/trunk/test/CodeGen/X86/mulfix_combine.ll Fri Aug 16 06:16:38 2019
@@ -0,0 +1,206 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux -o - | FileCheck %s
+
+declare i32 @llvm.smul.fix.i32(i32, i32, i32 immarg)
+declare i32 @llvm.umul.fix.i32(i32, i32, i32 immarg)
+declare i32 @llvm.smul.fix.sat.i32(i32, i32, i32 immarg)
+
+declare <4 x i32> @llvm.smul.fix.v4i32(<4 x i32>, <4 x i32>, i32 immarg)
+declare <4 x i32> @llvm.umul.fix.v4i32(<4 x i32>, <4 x i32>, i32 immarg)
+declare <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32 immarg)
+
+define i32 @smulfix_undef(i32 %y) nounwind {
+; CHECK-LABEL: smulfix_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    retq
+  %tmp = call i32 @llvm.smul.fix.i32(i32 undef, i32 %y, i32 2)
+  ret i32 %tmp
+}
+
+define i32 @smulfix_zero(i32 %y) nounwind {
+; CHECK-LABEL: smulfix_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    retq
+  %tmp = call i32 @llvm.smul.fix.i32(i32 0, i32 %y, i32 2)
+  ret i32 %tmp
+}
+
+define i32 @umulfix_undef(i32 %y) nounwind {
+; CHECK-LABEL: umulfix_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    retq
+  %tmp = call i32 @llvm.umul.fix.i32(i32 undef, i32 %y, i32 2)
+  ret i32 %tmp
+}
+
+define i32 @umulfix_zero(i32 %y) nounwind {
+; CHECK-LABEL: umulfix_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    retq
+  %tmp = call i32 @llvm.umul.fix.i32(i32 0, i32 %y, i32 2)
+  ret i32 %tmp
+}
+
+define i32 @smulfixsat_undef(i32 %y) nounwind {
+; CHECK-LABEL: smulfixsat_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    movl $1, %ecx
+; CHECK-NEXT:    negl %ecx
+; CHECK-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-NEXT:    cmovlel %eax, %ecx
+; CHECK-NEXT:    movl $-2, %eax
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; CHECK-NEXT:    cmovgel %ecx, %eax
+; CHECK-NEXT:    retq
+  %tmp = call i32 @llvm.smul.fix.sat.i32(i32 undef, i32 %y, i32 2)
+  ret i32 %tmp
+}
+
+define i32 @smulfixsat_zero(i32 %y) nounwind {
+; CHECK-LABEL: smulfixsat_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    movl $1, %ecx
+; CHECK-NEXT:    negl %ecx
+; CHECK-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-NEXT:    cmovlel %eax, %ecx
+; CHECK-NEXT:    movl $-2, %eax
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; CHECK-NEXT:    cmovgel %ecx, %eax
+; CHECK-NEXT:    retq
+  %tmp = call i32 @llvm.smul.fix.sat.i32(i32 0, i32 %y, i32 2)
+  ret i32 %tmp
+}
+
+define <4 x i32> @vec_smulfix_undef(<4 x i32> %y) nounwind {
+; CHECK-LABEL: vec_smulfix_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; CHECK-NEXT:    pxor %xmm2, %xmm2
+; CHECK-NEXT:    pcmpgtd %xmm0, %xmm2
+; CHECK-NEXT:    pand %xmm0, %xmm2
+; CHECK-NEXT:    pmuludq %xmm0, %xmm0
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; CHECK-NEXT:    pmuludq %xmm0, %xmm1
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    psubd %xmm2, %xmm0
+; CHECK-NEXT:    pslld $30, %xmm0
+; CHECK-NEXT:    retq
+  %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> undef, <4 x i32> %y, i32 2)
+  ret <4 x i32> %tmp
+}
+
+define <4 x i32> @vec_smulfix_zero(<4 x i32> %y) nounwind {
+; CHECK-LABEL: vec_smulfix_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pxor %xmm1, %xmm1
+; CHECK-NEXT:    pxor %xmm2, %xmm2
+; CHECK-NEXT:    pmuludq %xmm0, %xmm2
+; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
+; CHECK-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; CHECK-NEXT:    pmuludq %xmm1, %xmm4
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[0,2,2,3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; CHECK-NEXT:    psrld $2, %xmm3
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    pslld $30, %xmm0
+; CHECK-NEXT:    por %xmm3, %xmm0
+; CHECK-NEXT:    retq
+  %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> %y, i32 2)
+  ret <4 x i32> %tmp
+}
+
+define <4 x i32> @vec_umulfix_undef(<4 x i32> %y) nounwind {
+; CHECK-LABEL: vec_umulfix_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; CHECK-NEXT:    pmuludq %xmm0, %xmm0
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; CHECK-NEXT:    pmuludq %xmm0, %xmm1
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    pslld $30, %xmm0
+; CHECK-NEXT:    retq
+  %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> undef, <4 x i32> %y, i32 2)
+  ret <4 x i32> %tmp
+}
+
+define <4 x i32> @vec_umulfix_zero(<4 x i32> %y) nounwind {
+; CHECK-LABEL: vec_umulfix_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pxor %xmm1, %xmm1
+; CHECK-NEXT:    pxor %xmm2, %xmm2
+; CHECK-NEXT:    pmuludq %xmm0, %xmm2
+; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
+; CHECK-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; CHECK-NEXT:    pmuludq %xmm1, %xmm4
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[0,2,2,3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; CHECK-NEXT:    psrld $2, %xmm3
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    pslld $30, %xmm0
+; CHECK-NEXT:    por %xmm3, %xmm0
+; CHECK-NEXT:    retq
+  %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> %y, i32 2)
+  ret <4 x i32> %tmp
+}
+
+define <4 x i32> @vec_smulfixsat_undef(<4 x i32> %y) nounwind {
+; CHECK-LABEL: vec_smulfixsat_undef:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    movl $1, %ecx
+; CHECK-NEXT:    negl %ecx
+; CHECK-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-NEXT:    cmovlel %eax, %ecx
+; CHECK-NEXT:    movl $-2, %eax
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; CHECK-NEXT:    cmovgel %ecx, %eax
+; CHECK-NEXT:    movd %eax, %xmm0
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0,0]
+; CHECK-NEXT:    retq
+  %tmp = call <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32> undef, <4 x i32> %y, i32 2)
+  ret <4 x i32> %tmp
+}
+
+define <4 x i32> @vec_smulfixsat_zero(<4 x i32> %y) nounwind {
+; CHECK-LABEL: vec_smulfixsat_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    shrdl $2, %eax, %eax
+; CHECK-NEXT:    movl $1, %ecx
+; CHECK-NEXT:    negl %ecx
+; CHECK-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-NEXT:    cmovlel %eax, %ecx
+; CHECK-NEXT:    movl $-2, %eax
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; CHECK-NEXT:    cmovgel %ecx, %eax
+; CHECK-NEXT:    movd %eax, %xmm0
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0,0]
+; CHECK-NEXT:    retq
+  %tmp = call <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> %y, i32 2)
+  ret <4 x i32> %tmp
+}




More information about the llvm-commits mailing list