[llvm] [SelectionDAG] Add MaskedValueIsZero check to allow folding of zero extended variables we know are safe to extend (PR #85573)

via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 18 12:44:45 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/85573

>From 710a43d8454a9f1c6e0e57394644f3a538f81b6d Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 17 Mar 2024 14:04:52 -0400
Subject: [PATCH 1/2] [SelectionDAG] Pre-commit tests (NFC)

---
 llvm/test/CodeGen/X86/dagcombine-shifts.ll | 134 +++++++++++++++++++++
 1 file changed, 134 insertions(+)

diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 42b325dd4c229e..229b2a22c0e5af 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -322,5 +322,139 @@ define void @g(i32 %a) nounwind {
   ret void
 }
 
+define i32 @shift_zext_shl(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andb $64, %al
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    shll $9, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shift_zext_shl:
+; X64:       # %bb.0:
+; X64-NEXT:    andb $64, %dil
+; X64-NEXT:    movzbl %dil, %eax
+; X64-NEXT:    shll $9, %eax
+; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    retq
+  %a = and i8 %x, 64
+  %b = zext i8 %a to i16
+  %c = shl i16 %b, 9
+  %d = zext i16 %c to i32
+  ret i32 %d
+}
+
+define i32 @shift_zext_shl2(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl2:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $64, %eax
+; X86-NEXT:    shll $9, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shift_zext_shl2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $64, %eax
+; X64-NEXT:    shll $9, %eax
+; X64-NEXT:    retq
+  %a = and i8 %x, 64
+  %b = zext i8 %a to i32
+  %c = shl i32 %b, 9
+  ret i32 %c
+}
+
+define <4 x i32> @shift_zext_shl_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl_vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    andb $64, %bl
+; X86-NEXT:    movzbl %bl, %edi
+; X86-NEXT:    shll $9, %edi
+; X86-NEXT:    movzwl %di, %edi
+; X86-NEXT:    andl $63, %ecx
+; X86-NEXT:    shll $8, %ecx
+; X86-NEXT:    andl $31, %edx
+; X86-NEXT:    shll $7, %edx
+; X86-NEXT:    andl $23, %esi
+; X86-NEXT:    shll $6, %esi
+; X86-NEXT:    movl %esi, 12(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %ecx, 4(%eax)
+; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: shift_zext_shl_vec:
+; X64:       # %bb.0:
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT:    retq
+  %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+  %b = zext <4 x i8> %a to <4 x i16>
+  %c = shl <4 x i16> %b, <i16 9, i16 8, i16 7, i16 6>
+  %d = zext <4 x i16> %c to <4 x i32>
+  ret <4 x i32> %d
+}
+
+define <4 x i32> @shift_zext_shl2_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl2_vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    andl $23, %edi
+; X86-NEXT:    andl $31, %esi
+; X86-NEXT:    andl $63, %edx
+; X86-NEXT:    andl $64, %ecx
+; X86-NEXT:    shll $9, %ecx
+; X86-NEXT:    shll $8, %edx
+; X86-NEXT:    shll $7, %esi
+; X86-NEXT:    shll $6, %edi
+; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl %esi, 8(%eax)
+; X86-NEXT:    movl %edx, 4(%eax)
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: shift_zext_shl2_vec:
+; X64:       # %bb.0:
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT:    retq
+  %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+  %b = zext <4 x i8> %a to <4 x i32>
+  %c = shl <4 x i32> %b, <i32 9, i32 8, i32 7, i32 6>
+  ret <4 x i32> %c
+}
+
 declare dso_local void @f(i64)
 

>From d8d1ac1d84e28e9bbab0bc172dc65e080c8d3791 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 17 Mar 2024 12:51:41 -0400
Subject: [PATCH 2/2] [SelectionDAG] Add MaskedValueIsZero check to allow
 folding of zero extended variables we know are safe to extend

Add ones for every high bit that will cleared.

This will allow us to evaluate variables that have their bits known to see if they have no risk of overflow despite the shift amount being greater than the difference between the two types.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 16 ++++++--
 llvm/test/CodeGen/X86/dagcombine-shifts.ll    | 39 ++++++++-----------
 2 files changed, 29 insertions(+), 26 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 351041780b8547..1d8ca71fbaff56 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -13802,11 +13802,21 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
         if (N0.getOpcode() == ISD::SHL) {
           // If the original shl may be shifting out bits, do not perform this
           // transformation.
-          // TODO: Add MaskedValueIsZero check.
           unsigned KnownZeroBits = ShVal.getValueSizeInBits() -
                                    ShVal.getOperand(0).getValueSizeInBits();
-          if (ShAmtC->getAPIntValue().ugt(KnownZeroBits))
-            return SDValue();
+          if (ShAmtC->getAPIntValue().ugt(KnownZeroBits)) {
+            // If the shift is too large, then see if we can deduce that the
+            // shift is safe anyway.
+            // Create a mask that has ones for the bits being shifted out.
+            llvm::APInt ShiftOutMask = llvm::APInt::getHighBitsSet(
+                ShVal.getValueSizeInBits(),
+                ShAmtC->getAPIntValue().getZExtValue());
+
+            // Check if the bits being shifted out are known to be zero.
+            if (!DAG.MaskedValueIsZero(ShVal, ShiftOutMask)) {
+              return SDValue();
+            }
+          }
         }
 
         // Ensure that the shift amount is wide enough for the shifted value.
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 229b2a22c0e5af..734abfe55a4ec4 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -326,18 +326,15 @@ define i32 @shift_zext_shl(i8 zeroext %x) {
 ; X86-LABEL: shift_zext_shl:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    andb $64, %al
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    andl $64, %eax
 ; X86-NEXT:    shll $9, %eax
-; X86-NEXT:    movzwl %ax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shift_zext_shl:
 ; X64:       # %bb.0:
-; X64-NEXT:    andb $64, %dil
-; X64-NEXT:    movzbl %dil, %eax
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $64, %eax
 ; X64-NEXT:    shll $9, %eax
-; X64-NEXT:    movzwl %ax, %eax
 ; X64-NEXT:    retq
   %a = and i8 %x, 64
   %b = zext i8 %a to i16
@@ -369,31 +366,27 @@ define i32 @shift_zext_shl2(i8 zeroext %x) {
 define <4 x i32> @shift_zext_shl_vec(<4 x i8> %x) nounwind {
 ; X86-LABEL: shift_zext_shl_vec:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    andb $64, %bl
-; X86-NEXT:    movzbl %bl, %edi
-; X86-NEXT:    shll $9, %edi
-; X86-NEXT:    movzwl %di, %edi
-; X86-NEXT:    andl $63, %ecx
-; X86-NEXT:    shll $8, %ecx
-; X86-NEXT:    andl $31, %edx
-; X86-NEXT:    shll $7, %edx
-; X86-NEXT:    andl $23, %esi
-; X86-NEXT:    shll $6, %esi
-; X86-NEXT:    movl %esi, 12(%eax)
-; X86-NEXT:    movl %edx, 8(%eax)
-; X86-NEXT:    movl %ecx, 4(%eax)
-; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    andl $64, %ecx
+; X86-NEXT:    shll $9, %ecx
+; X86-NEXT:    andl $63, %edx
+; X86-NEXT:    shll $8, %edx
+; X86-NEXT:    andl $31, %esi
+; X86-NEXT:    shll $7, %esi
+; X86-NEXT:    andl $23, %edi
+; X86-NEXT:    shll $6, %edi
+; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl %esi, 8(%eax)
+; X86-NEXT:    movl %edx, 4(%eax)
+; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
-; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: shift_zext_shl_vec:



More information about the llvm-commits mailing list