[llvm] [SelectionDAG] Let ComputeKnownSignBits handle (shl (ext X), C) (PR #97695)
Björn Pettersson via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 5 11:05:25 PDT 2024
https://github.com/bjope updated https://github.com/llvm/llvm-project/pull/97695
>From ce0ec52a8b8c4d638ee7cc7d9ae42bc2efad80d1 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Thu, 4 Jul 2024 18:02:16 +0200
Subject: [PATCH 1/4] [CodeGen] Pre-commit test case related to
ComputeNumSignBits for SHL
Adding test cases aiming at showing possibility to look through
ZERO_EXTEND/ANY_EXTEND when computing number of sign bits for an
SHL node. If all extended bits are shifted out we can analyze the
operand that is extended.
---
.../CodeGen/X86/computenumsignbits-shl.ll | 191 ++++++++++++++++++
1 file changed, 191 insertions(+)
create mode 100644 llvm/test/CodeGen/X86/computenumsignbits-shl.ll
diff --git a/llvm/test/CodeGen/X86/computenumsignbits-shl.ll b/llvm/test/CodeGen/X86/computenumsignbits-shl.ll
new file mode 100644
index 0000000000000..5799bb653ebc6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/computenumsignbits-shl.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64
+
+; Verify that we can look through a ZERO_EXTEND/ANY_EXTEND when doing
+; ComputeNumSignBits for SHL.
+; We use the (sshlsat x, c) -> (shl x, c) fold as verification.
+; That fold should happen if c is less than the number of sign bits in x
+
+define void @computeNumSignBits_shl_zext_1(i8 %x, ptr %p) nounwind {
+; X64-LABEL: computeNumSignBits_shl_zext_1:
+; X64: # %bb.0:
+; X64-NEXT: sarb $5, %dil
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: shll $10, %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: testw %cx, %cx
+; X64-NEXT: sets %dl
+; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
+; X64-NEXT: movl %eax, %edi
+; X64-NEXT: shll $11, %edi
+; X64-NEXT: movswl %di, %r8d
+; X64-NEXT: shrl %r8d
+; X64-NEXT: cmpw %r8w, %cx
+; X64-NEXT: cmovnel %edx, %edi
+; X64-NEXT: movw %di, (%rsi)
+; X64-NEXT: movl %eax, %edi
+; X64-NEXT: shll $12, %edi
+; X64-NEXT: movswl %di, %r8d
+; X64-NEXT: shrl $2, %r8d
+; X64-NEXT: cmpw %r8w, %cx
+; X64-NEXT: cmovnel %edx, %edi
+; X64-NEXT: movw %di, (%rsi)
+; X64-NEXT: shll $13, %eax
+; X64-NEXT: movswl %ax, %edi
+; X64-NEXT: shrl $3, %edi
+; X64-NEXT: cmpw %di, %cx
+; X64-NEXT: cmovnel %edx, %eax
+; X64-NEXT: movw %ax, (%rsi)
+; X64-NEXT: retq
+ %ashr = ashr i8 %x, 5
+ %zext = zext i8 %ashr to i16
+ %nsb4 = shl i16 %zext, 10
+ ; Expecting (sshlsat x, c) -> (shl x, c) fold.
+ %tmp1 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 1)
+ store volatile i16 %tmp1, ptr %p
+ ; Expecting (sshlsat x, c) -> (shl x, c) fold.
+ %tmp2 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 2)
+ store volatile i16 %tmp2, ptr %p
+ ; Expecting (sshlsat x, c) -> (shl x, c) fold.
+ %tmp3 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 3)
+ store volatile i16 %tmp3, ptr %p
+ ret void
+}
+
+define void @computeNumSignBits_shl_zext_2(i8 %x, ptr %p) nounwind {
+; X64-LABEL: computeNumSignBits_shl_zext_2:
+; X64: # %bb.0:
+; X64-NEXT: sarb $5, %dil
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: shll $10, %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: testw %cx, %cx
+; X64-NEXT: sets %dl
+; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
+; X64-NEXT: shll $14, %eax
+; X64-NEXT: movswl %ax, %edi
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: cmpw %di, %cx
+; X64-NEXT: cmovnel %edx, %eax
+; X64-NEXT: movw %ax, (%rsi)
+; X64-NEXT: retq
+ %ashr = ashr i8 %x, 5
+ %zext = zext i8 %ashr to i16
+ %nsb4 = shl i16 %zext, 10
+ ; 4 sign bits. Not expecting (sshlsat x, c) -> (shl x, c) fold.
+ %tmp4 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 4)
+ store volatile i16 %tmp4, ptr %p
+ ret void
+}
+
+define void @computeNumSignBits_shl_zext_vec_1(<2 x i8> %x, ptr %p) nounwind {
+; X64-LABEL: computeNumSignBits_shl_zext_vec_1:
+; X64: # %bb.0:
+; X64-NEXT: psrlw $5, %xmm0
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
+; X64-NEXT: pxor %xmm1, %xmm0
+; X64-NEXT: psubb %xmm1, %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1024,4096,u,u,u,u,u,u]
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; X64-NEXT: pand %xmm0, %xmm2
+; X64-NEXT: pcmpgtw %xmm0, %xmm1
+; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: por %xmm2, %xmm1
+; X64-NEXT: movdqa %xmm0, %xmm2
+; X64-NEXT: paddw %xmm0, %xmm2
+; X64-NEXT: movdqa %xmm2, %xmm3
+; X64-NEXT: psraw $1, %xmm3
+; X64-NEXT: pcmpeqw %xmm0, %xmm3
+; X64-NEXT: movdqa %xmm3, %xmm0
+; X64-NEXT: pandn %xmm1, %xmm0
+; X64-NEXT: pand %xmm2, %xmm3
+; X64-NEXT: por %xmm0, %xmm3
+; X64-NEXT: movd %xmm3, (%rdi)
+; X64-NEXT: retq
+ %ashr = ashr <2 x i8> %x, <i8 5, i8 5>
+ %zext = zext <2 x i8> %ashr to <2 x i16>
+ %nsb4_2 = shl <2 x i16> %zext, <i16 10, i16 12>
+ ; Expecting (sshlsat x, c) -> (shl x, c) fold.
+ %tmp1 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %nsb4_2, <2 x i16> <i16 1, i16 1>)
+ store volatile <2 x i16> %tmp1, ptr %p
+ ret void
+}
+
+define void @computeNumSignBits_shl_zext_vec_2(<2 x i8> %x, ptr %p) nounwind {
+; X64-LABEL: computeNumSignBits_shl_zext_vec_2:
+; X64: # %bb.0:
+; X64-NEXT: psrlw $5, %xmm0
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
+; X64-NEXT: pxor %xmm1, %xmm0
+; X64-NEXT: psubb %xmm1, %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1024,4096,u,u,u,u,u,u]
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; X64-NEXT: pand %xmm0, %xmm2
+; X64-NEXT: pcmpgtw %xmm0, %xmm1
+; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: por %xmm2, %xmm1
+; X64-NEXT: movdqa %xmm0, %xmm2
+; X64-NEXT: psllw $2, %xmm2
+; X64-NEXT: movdqa %xmm2, %xmm3
+; X64-NEXT: psraw $2, %xmm3
+; X64-NEXT: pcmpeqw %xmm0, %xmm3
+; X64-NEXT: movdqa %xmm3, %xmm0
+; X64-NEXT: pandn %xmm1, %xmm0
+; X64-NEXT: pand %xmm2, %xmm3
+; X64-NEXT: por %xmm0, %xmm3
+; X64-NEXT: movd %xmm3, (%rdi)
+; X64-NEXT: retq
+ %ashr = ashr <2 x i8> %x, <i8 5, i8 5>
+ %zext = zext <2 x i8> %ashr to <2 x i16>
+ %nsb4_2 = shl <2 x i16> %zext, <i16 10, i16 12>
+ ; Not expecting (sshlsat x, c) -> (shl x, c) fold.
+ ; Because only 2 sign bits in element 1.
+ %tmp1 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %nsb4_2, <2 x i16> <i16 2, i16 2>)
+ store volatile <2 x i16> %tmp1, ptr %p
+ ret void
+}
+
+define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind {
+; X64-LABEL: computeNumSignBits_shl_zext_vec_3:
+; X64: # %bb.0:
+; X64-NEXT: psrlw $5, %xmm0
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
+; X64-NEXT: pxor %xmm1, %xmm0
+; X64-NEXT: psubb %xmm1, %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16384,4096,u,u,u,u,u,u]
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; X64-NEXT: pand %xmm0, %xmm2
+; X64-NEXT: pcmpgtw %xmm0, %xmm1
+; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: por %xmm2, %xmm1
+; X64-NEXT: movdqa %xmm0, %xmm2
+; X64-NEXT: paddw %xmm0, %xmm2
+; X64-NEXT: movdqa %xmm2, %xmm3
+; X64-NEXT: psraw $1, %xmm3
+; X64-NEXT: pcmpeqw %xmm0, %xmm3
+; X64-NEXT: movdqa %xmm3, %xmm0
+; X64-NEXT: pandn %xmm1, %xmm0
+; X64-NEXT: pand %xmm2, %xmm3
+; X64-NEXT: por %xmm0, %xmm3
+; X64-NEXT: movd %xmm3, (%rdi)
+; X64-NEXT: retq
+ %ashr = ashr <2 x i8> %x, <i8 5, i8 5>
+ %zext = zext <2 x i8> %ashr to <2 x i16>
+ %nsb1_2 = shl <2 x i16> %zext, <i16 14, i16 12>
+ ; Not expecting (sshlsat x, c) -> (shl x, c) fold.
+ ; Because all sign bits shifted out for element 0
+ %tmp1 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %nsb1_2, <2 x i16> <i16 1, i16 1>)
+ store volatile <2 x i16> %tmp1, ptr %p
+ ret void
+}
>From 6ca9ff2a98f6041f59d1426737cda6818c33cd0f Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Thu, 4 Jul 2024 10:34:04 +0200
Subject: [PATCH 2/4] [SelectionDAG] Let ComputeKnownSignBits handle (shl (ext
X), C)
Add simple support for looking through ZEXT/ANYEXT/SEXT when doing
ComputeKnownSignBits for SHL. This is valid for the case when all
extended bits are shifted out, because then the number of sign bits
can be found by analysing the EXT operand.
A future improvement could be to pass along the "shifted left by"
information in the recursive calls to ComputeKnownSignBits. Allowing
us to handle this more generically.
---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 24 +++++++--
.../CodeGen/X86/computenumsignbits-shl.ll | 49 +++----------------
2 files changed, 29 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 96242305e9eab..152dee9c2f78f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4615,12 +4615,30 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
return Tmp;
case ISD::SHL:
- if (std::optional<uint64_t> ShAmt =
+ if (std::optional<uint64_t> MaxShAmt =
getValidMaximumShiftAmount(Op, DemandedElts, Depth + 1)) {
+ if (Op.getOperand(0).getOpcode() == ISD::ANY_EXTEND ||
+ Op.getOperand(0).getOpcode() == ISD::ZERO_EXTEND ||
+ Op.getOperand(0).getOpcode() == ISD::SIGN_EXTEND)
+ if (std::optional<uint64_t> MinShAmt =
+ getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1)) {
+ SDValue Src = Op.getOperand(0);
+ EVT SrcVT = Src.getValueType();
+ SDValue ExtendedOp = Op.getOperand(0).getOperand(0);
+ EVT ExtendedOpVT = ExtendedOp.getValueType();
+ uint64_t ExtendedWidth =
+ SrcVT.getScalarSizeInBits() - ExtendedOpVT.getScalarSizeInBits();
+ if (ExtendedWidth <= *MinShAmt) {
+ Tmp = ComputeNumSignBits(ExtendedOp, DemandedElts, Depth + 1);
+ Tmp += ExtendedWidth;
+ if (*MaxShAmt < Tmp)
+ return Tmp - *MaxShAmt;
+ }
+ }
// shl destroys sign bits, ensure it doesn't shift out all sign bits.
Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
- if (*ShAmt < Tmp)
- return Tmp - *ShAmt;
+ if (*MaxShAmt < Tmp)
+ return Tmp - *MaxShAmt;
}
break;
case ISD::AND:
diff --git a/llvm/test/CodeGen/X86/computenumsignbits-shl.ll b/llvm/test/CodeGen/X86/computenumsignbits-shl.ll
index 5799bb653ebc6..473fecc307ed4 100644
--- a/llvm/test/CodeGen/X86/computenumsignbits-shl.ll
+++ b/llvm/test/CodeGen/X86/computenumsignbits-shl.ll
@@ -12,30 +12,12 @@ define void @computeNumSignBits_shl_zext_1(i8 %x, ptr %p) nounwind {
; X64-NEXT: sarb $5, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: movl %eax, %ecx
-; X64-NEXT: shll $10, %ecx
-; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: testw %cx, %cx
-; X64-NEXT: sets %dl
-; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
-; X64-NEXT: movl %eax, %edi
-; X64-NEXT: shll $11, %edi
-; X64-NEXT: movswl %di, %r8d
-; X64-NEXT: shrl %r8d
-; X64-NEXT: cmpw %r8w, %cx
-; X64-NEXT: cmovnel %edx, %edi
-; X64-NEXT: movw %di, (%rsi)
-; X64-NEXT: movl %eax, %edi
-; X64-NEXT: shll $12, %edi
-; X64-NEXT: movswl %di, %r8d
-; X64-NEXT: shrl $2, %r8d
-; X64-NEXT: cmpw %r8w, %cx
-; X64-NEXT: cmovnel %edx, %edi
-; X64-NEXT: movw %di, (%rsi)
+; X64-NEXT: shll $11, %ecx
+; X64-NEXT: movw %cx, (%rsi)
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: shll $12, %ecx
+; X64-NEXT: movw %cx, (%rsi)
; X64-NEXT: shll $13, %eax
-; X64-NEXT: movswl %ax, %edi
-; X64-NEXT: shrl $3, %edi
-; X64-NEXT: cmpw %di, %cx
-; X64-NEXT: cmovnel %edx, %eax
; X64-NEXT: movw %ax, (%rsi)
; X64-NEXT: retq
%ashr = ashr i8 %x, 5
@@ -88,24 +70,9 @@ define void @computeNumSignBits_shl_zext_vec_1(<2 x i8> %x, ptr %p) nounwind {
; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X64-NEXT: pxor %xmm1, %xmm0
; X64-NEXT: psubb %xmm1, %xmm0
-; X64-NEXT: pxor %xmm1, %xmm1
-; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1024,4096,u,u,u,u,u,u]
-; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-NEXT: pand %xmm0, %xmm2
-; X64-NEXT: pcmpgtw %xmm0, %xmm1
-; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; X64-NEXT: por %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: paddw %xmm0, %xmm2
-; X64-NEXT: movdqa %xmm2, %xmm3
-; X64-NEXT: psraw $1, %xmm3
-; X64-NEXT: pcmpeqw %xmm0, %xmm3
-; X64-NEXT: movdqa %xmm3, %xmm0
-; X64-NEXT: pandn %xmm1, %xmm0
-; X64-NEXT: pand %xmm2, %xmm3
-; X64-NEXT: por %xmm0, %xmm3
-; X64-NEXT: movd %xmm3, (%rdi)
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2048,8192,u,u,u,u,u,u]
+; X64-NEXT: movd %xmm0, (%rdi)
; X64-NEXT: retq
%ashr = ashr <2 x i8> %x, <i8 5, i8 5>
%zext = zext <2 x i8> %ashr to <2 x i16>
>From 62b7a90bd1bace956e41418bcfabfea1cbc4d2c2 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Fri, 5 Jul 2024 16:51:08 +0200
Subject: [PATCH 3/4] Fixup
- Rename test case
- Use getValidShiftAmountRange
- Use ISD::isExtOpcode
- Improve variable names (Src/ExtendedOp -> Ext/Extendee)
---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 40 +++++++++----------
...msignbits-shl.ll => known-signbits-shl.ll} | 0
2 files changed, 19 insertions(+), 21 deletions(-)
rename llvm/test/CodeGen/X86/{computenumsignbits-shl.ll => known-signbits-shl.ll} (100%)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 152dee9c2f78f..2a05f64666115 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4615,30 +4615,28 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
return Tmp;
case ISD::SHL:
- if (std::optional<uint64_t> MaxShAmt =
- getValidMaximumShiftAmount(Op, DemandedElts, Depth + 1)) {
- if (Op.getOperand(0).getOpcode() == ISD::ANY_EXTEND ||
- Op.getOperand(0).getOpcode() == ISD::ZERO_EXTEND ||
- Op.getOperand(0).getOpcode() == ISD::SIGN_EXTEND)
- if (std::optional<uint64_t> MinShAmt =
- getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1)) {
- SDValue Src = Op.getOperand(0);
- EVT SrcVT = Src.getValueType();
- SDValue ExtendedOp = Op.getOperand(0).getOperand(0);
- EVT ExtendedOpVT = ExtendedOp.getValueType();
- uint64_t ExtendedWidth =
- SrcVT.getScalarSizeInBits() - ExtendedOpVT.getScalarSizeInBits();
- if (ExtendedWidth <= *MinShAmt) {
- Tmp = ComputeNumSignBits(ExtendedOp, DemandedElts, Depth + 1);
- Tmp += ExtendedWidth;
- if (*MaxShAmt < Tmp)
- return Tmp - *MaxShAmt;
- }
+ if (std::optional<ConstantRange> ShAmtRange =
+ getValidShiftAmountRange(Op, DemandedElts, Depth + 1)) {
+ uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
+ uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
+ if (ISD::isExtOpcode(Op.getOperand(0).getOpcode())) {
+ SDValue Ext = Op.getOperand(0);
+ EVT ExtVT = Ext.getValueType();
+ SDValue Extendee = Ext.getOperand(0);
+ EVT ExtendeeVT = Extendee.getValueType();
+ uint64_t SizeDifference =
+ ExtVT.getScalarSizeInBits() - ExtendeeVT.getScalarSizeInBits();
+ if (SizeDifference <= MinShAmt) {
+ Tmp = SizeDifference +
+ ComputeNumSignBits(Extendee, DemandedElts, Depth + 1);
+ if (MaxShAmt < Tmp)
+ return Tmp - MaxShAmt;
}
+ }
// shl destroys sign bits, ensure it doesn't shift out all sign bits.
Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
- if (*MaxShAmt < Tmp)
- return Tmp - *MaxShAmt;
+ if (MaxShAmt < Tmp)
+ return Tmp - MaxShAmt;
}
break;
case ISD::AND:
diff --git a/llvm/test/CodeGen/X86/computenumsignbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll
similarity index 100%
rename from llvm/test/CodeGen/X86/computenumsignbits-shl.ll
rename to llvm/test/CodeGen/X86/known-signbits-shl.ll
>From f6a8ca5bcbf914dc3e7764abff276c245fd8e95b Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Fri, 5 Jul 2024 20:03:42 +0200
Subject: [PATCH 4/4] Fixup: Add code comment
---
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 2a05f64666115..2a17e6e9278b3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4619,6 +4619,11 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
getValidShiftAmountRange(Op, DemandedElts, Depth + 1)) {
uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
+ // Try to look through ZERO/SIGN/ANY_EXTEND. If all extended bits are
+ // shifted out, then we can compute the number of sign bits for the
+ // operand being extended. A future improvement could be to pass along the
+ // "shifted left by" information in the recursive calls to
+ // ComputeKnownSignBits. Allowing us to handle this more generically.
if (ISD::isExtOpcode(Op.getOperand(0).getOpcode())) {
SDValue Ext = Op.getOperand(0);
EVT ExtVT = Ext.getValueType();
More information about the llvm-commits
mailing list