[llvm] [SelectionDAG] Add MaskedValueIsZero check to allow folding of zero extended variables we know are safe to extend (PR #85573)

via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 17 18:48:40 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/85573

>From d65b00f494845e0df974e5a679edd5cb9819e782 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 17 Mar 2024 14:04:52 -0400
Subject: [PATCH 1/2] [SelectionDAG] Pre-commit tests (NFC)

---
 llvm/test/CodeGen/X86/dagcombine-shifts.ll | 44 ++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 42b325dd4c229e..0795e99669c6e0 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -322,5 +322,49 @@ define void @g(i32 %a) nounwind {
   ret void
 }
 
+define i32 @shift_zext_shl(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andb $64, %al
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    shll $9, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shift_zext_shl:
+; X64:       # %bb.0:
+; X64-NEXT:    andb $64, %dil
+; X64-NEXT:    movzbl %dil, %eax
+; X64-NEXT:    shll $9, %eax
+; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    retq
+  %1 = and i8 %x, 64
+  %2 = zext i8 %1 to i16
+  %3 = shl i16 %2, 9
+  %4 = zext i16 %3 to i32
+  ret i32 %4
+}
+
+define i32 @shift_zext_shl2(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl2:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $64, %eax
+; X86-NEXT:    shll $9, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shift_zext_shl2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $64, %eax
+; X64-NEXT:    shll $9, %eax
+; X64-NEXT:    retq
+  %1 = and i8 %x, 64
+  %2 = zext i8 %1 to i32
+  %3 = shl i32 %2, 9
+  ret i32 %3
+}
+
 declare dso_local void @f(i64)
 

>From 035fec61b01a9cf623225af07a3cfd415cf21e2c Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 17 Mar 2024 12:51:41 -0400
Subject: [PATCH 2/2] [SelectionDAG] Add MaskedValueIsZero check to allow
 folding of zero extended variables we know are safe to extend

Add ones for every high bit that will cleared.

This will allow us to evaluate variables that have their bits known to see if they have no risk of overflow despite the shift amount being greater than the difference between the two types.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 16 +++++++++++++---
 llvm/test/CodeGen/X86/dagcombine-shifts.ll    |  9 +++------
 2 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 5eb53d57c9c2bf..711bc2666d6623 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -13802,11 +13802,21 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
         if (N0.getOpcode() == ISD::SHL) {
           // If the original shl may be shifting out bits, do not perform this
           // transformation.
-          // TODO: Add MaskedValueIsZero check.
           unsigned KnownZeroBits = ShVal.getValueSizeInBits() -
                                    ShVal.getOperand(0).getValueSizeInBits();
-          if (ShAmtC->getAPIntValue().ugt(KnownZeroBits))
-            return SDValue();
+          if (ShAmtC->getAPIntValue().ugt(KnownZeroBits)) {
+            // If the shift is too large, then see if we can deduce that the
+            // shift is safe anyway.
+            // Create a mask that has ones for the bits being shifted out.
+            llvm::APInt ShiftOutMask = llvm::APInt::getHighBitsSet(
+                ShVal.getValueSizeInBits(),
+                ShAmtC->getAPIntValue().getZExtValue());
+
+            // Check if the bits being shifted out are known to be zero.
+            if (!DAG.MaskedValueIsZero(ShVal, ShiftOutMask)) {
+              return SDValue();
+            }
+          }
         }
 
         // Ensure that the shift amount is wide enough for the shifted value.
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 0795e99669c6e0..2322277aef2e32 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -326,18 +326,15 @@ define i32 @shift_zext_shl(i8 zeroext %x) {
 ; X86-LABEL: shift_zext_shl:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    andb $64, %al
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    andl $64, %eax
 ; X86-NEXT:    shll $9, %eax
-; X86-NEXT:    movzwl %ax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shift_zext_shl:
 ; X64:       # %bb.0:
-; X64-NEXT:    andb $64, %dil
-; X64-NEXT:    movzbl %dil, %eax
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $64, %eax
 ; X64-NEXT:    shll $9, %eax
-; X64-NEXT:    movzwl %ax, %eax
 ; X64-NEXT:    retq
   %1 = and i8 %x, 64
   %2 = zext i8 %1 to i16



More information about the llvm-commits mailing list