[llvm] 6c3bf36 - [X86] Invert transforming `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1` -> `(-x << C0) & C1`

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Sat May 13 12:36:32 PDT 2023


Author: Noah Goldstein
Date: 2023-05-13T14:35:57-05:00
New Revision: 6c3bf364bf95209925b1e884077ec79cca274dc7

URL: https://github.com/llvm/llvm-project/commit/6c3bf364bf95209925b1e884077ec79cca274dc7
DIFF: https://github.com/llvm/llvm-project/commit/6c3bf364bf95209925b1e884077ec79cca274dc7.diff

LOG: [X86] Invert transforming `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1` -> `(-x << C0) & C1`

We can detect the case under the following circumstances:
Take `(Pow2_Ceil(C1) - (1 << C0))` as `C2`.
    1) `C2` is NOT a power of 2.
    2) `C2 + LeastSignificantBit(C2)` is a nonzero power of 2.
    3) `C2 u>= C1`

The motivation is the middle end transforms:
    `(-x << C0) & C1`
to
    `(x * (Pow2_Ceil(C1) - (1 << C2))) & C1`

As it saves IR instructions. On X86 the two instruction, `sub` and
`shl`, and better than the `mul` so we want to undo the transform.

This comes up when shifting a bit-mask by a byte-misalignment i.e:
    `y << ((-(uintptr)x * 8) & 63)`

Alive2 Proofs (including all cases with undefs in the vector):
https://alive2.llvm.org/ce/z/f-65b6

Reviewed By: RKSimon, pengfei

Differential Revision: https://reviews.llvm.org/D150294

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/undo-mul-and.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 013f444ad23d..2baf59d6a7cb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -50316,6 +50316,39 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
     }
   }
 
+  // InstCombine converts:
+  //    `(-x << C0) & C1`
+  // to
+  //    `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1`
+  // This saves an IR instruction but on x86 the neg/shift version is preferable
+  // so undo the transform.
+
+  if (N0.getOpcode() == ISD::MUL && N0.hasOneUse()) {
+    // TODO: We don't actually need a splat for this, we just need the checks to
+    // hold for each element.
+    ConstantSDNode *N1C = isConstOrConstSplat(N1, /*AllowUndefs*/ true,
+                                              /*AllowTruncation*/ false);
+    ConstantSDNode *N01C =
+        isConstOrConstSplat(N0.getOperand(1), /*AllowUndefs*/ true,
+                            /*AllowTruncation*/ false);
+    if (N1C && N01C) {
+      const APInt &MulC = N01C->getAPIntValue();
+      const APInt &AndC = N1C->getAPIntValue();
+      APInt MulCLowBit = MulC & (-MulC);
+      if (MulC.uge(AndC) && !MulC.isPowerOf2() &&
+          (MulCLowBit + MulC).isPowerOf2()) {
+        SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
+                                  N0.getOperand(0));
+        int32_t MulCLowBitLog = MulCLowBit.exactLogBase2();
+        assert(MulCLowBitLog != -1 &&
+               "Isolated lowbit is somehow not a power of 2!");
+        SDValue Shift = DAG.getNode(ISD::SHL, dl, VT, Neg,
+                                    DAG.getConstant(MulCLowBitLog, dl, VT));
+        return DAG.getNode(ISD::AND, dl, VT, Shift, N1);
+      }
+    }
+  }
+
   if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
     return V;
 

diff  --git a/llvm/test/CodeGen/X86/undo-mul-and.ll b/llvm/test/CodeGen/X86/undo-mul-and.ll
index f573bdcd455c..c9c40099e546 100644
--- a/llvm/test/CodeGen/X86/undo-mul-and.ll
+++ b/llvm/test/CodeGen/X86/undo-mul-and.ll
@@ -6,7 +6,9 @@
 define i32 @mul_and_to_neg_shl_and(i32 %x) {
 ; CHECK-LABEL: mul_and_to_neg_shl_and:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    imull $56, %edi, %eax
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT:    negl %edi
+; CHECK-NEXT:    leal (,%rdi,8), %eax
 ; CHECK-NEXT:    andl $56, %eax
 ; CHECK-NEXT:    retq
   %mul = mul i32 %x, 56
@@ -17,7 +19,9 @@ define i32 @mul_and_to_neg_shl_and(i32 %x) {
 define i32 @mul_and_to_neg_shl_and2(i32 %x) {
 ; CHECK-LABEL: mul_and_to_neg_shl_and2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    imull $56, %edi, %eax
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT:    negl %edi
+; CHECK-NEXT:    leal (,%rdi,8), %eax
 ; CHECK-NEXT:    andl $48, %eax
 ; CHECK-NEXT:    retq
   %mul = mul i32 %x, 56
@@ -28,25 +32,26 @@ define i32 @mul_and_to_neg_shl_and2(i32 %x) {
 define <4 x i32> @mul_and_to_neg_shl_and_vec(<4 x i32> %x) {
 ; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec:
 ; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [56,56,56,56]
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE-NEXT:    pmuludq %xmm1, %xmm0
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; CHECK-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    pxor %xmm1, %xmm1
+; CHECK-SSE-NEXT:    psubd %xmm0, %xmm1
+; CHECK-SSE-NEXT:    pslld $3, %xmm1
+; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE-NEXT:    movdqa %xmm1, %xmm0
 ; CHECK-SSE-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT:    vpslld $3, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec:
 ; CHECK-AVX512:       # %bb.0:
-; CHECK-AVX512-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
+; CHECK-AVX512-NEXT:    vpslld $3, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    retq
   %mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 56>
@@ -143,25 +148,26 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat2(<4 x i32> %x) {
 define <4 x i32> @mul_and_to_neg_shl_and_vec_with_undef_mul(<4 x i32> %x) {
 ; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul:
 ; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm1 = <56,56,56,u>
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE-NEXT:    pmuludq %xmm1, %xmm0
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; CHECK-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    pxor %xmm1, %xmm1
+; CHECK-SSE-NEXT:    psubd %xmm0, %xmm1
+; CHECK-SSE-NEXT:    pslld $3, %xmm1
+; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE-NEXT:    movdqa %xmm1, %xmm0
 ; CHECK-SSE-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT:    vpslld $3, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul:
 ; CHECK-AVX512:       # %bb.0:
-; CHECK-AVX512-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
+; CHECK-AVX512-NEXT:    vpslld $3, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    retq
   %mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 undef>
@@ -172,25 +178,26 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_with_undef_mul(<4 x i32> %x) {
 define <4 x i32> @mul_and_to_neg_shl_and_vec_with_undef_and(<4 x i32> %x) {
 ; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_with_undef_and:
 ; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [56,56,56,56]
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE-NEXT:    pmuludq %xmm1, %xmm0
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; CHECK-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    pxor %xmm1, %xmm1
+; CHECK-SSE-NEXT:    psubd %xmm0, %xmm1
+; CHECK-SSE-NEXT:    pslld $3, %xmm1
+; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE-NEXT:    movdqa %xmm1, %xmm0
 ; CHECK-SSE-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_with_undef_and:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT:    vpslld $3, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_with_undef_and:
 ; CHECK-AVX512:       # %bb.0:
-; CHECK-AVX512-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
+; CHECK-AVX512-NEXT:    vpslld $3, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    retq
   %mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 56>
@@ -201,39 +208,20 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_with_undef_and(<4 x i32> %x) {
 define <16 x i8> @mul_and_to_neg_shl_and_vec_with_undef_mul_and(<16 x i8> %x) {
 ; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul_and:
 ; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    movdqa %xmm0, %xmm1
-; CHECK-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; CHECK-SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; CHECK-SSE-NEXT:    pand %xmm2, %xmm1
-; CHECK-SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; CHECK-SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE-NEXT:    pand %xmm2, %xmm0
-; CHECK-SSE-NEXT:    packuswb %xmm1, %xmm0
-; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    pxor %xmm1, %xmm1
+; CHECK-SSE-NEXT:    psubb %xmm0, %xmm1
+; CHECK-SSE-NEXT:    psllw $2, %xmm1
+; CHECK-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE-NEXT:    movdqa %xmm1, %xmm0
 ; CHECK-SSE-NEXT:    retq
 ;
-; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul_and:
-; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; CHECK-AVX1-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; CHECK-AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-AVX1-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    retq
-;
-; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul_and:
-; CHECK-AVX512:       # %bb.0:
-; CHECK-AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; CHECK-AVX512-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; CHECK-AVX512-NEXT:    vpmovwb %ymm0, %xmm0
-; CHECK-AVX512-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX512-NEXT:    vzeroupper
-; CHECK-AVX512-NEXT:    retq
+; CHECK-AVX-LABEL: mul_and_to_neg_shl_and_vec_with_undef_mul_and:
+; CHECK-AVX:       # %bb.0:
+; CHECK-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX-NEXT:    vpsubb %xmm0, %xmm1, %xmm0
+; CHECK-AVX-NEXT:    vpsllw $2, %xmm0, %xmm0
+; CHECK-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT:    retq
   %mul = mul <16 x i8> %x, <i8 12, i8 12, i8 12, i8 12, i8 undef, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12>
   %and = and <16 x i8> %mul, <i8 11, i8 undef, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11>
   ret <16 x i8> %and
@@ -272,5 +260,3 @@ define i32 @mul_and_to_neg_shl_and_fail_mask_to_large(i32 %x) {
   %and = and i32 %mul, 120
   ret i32 %and
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-AVX: {{.*}}


        


More information about the llvm-commits mailing list