[llvm] ae60706 - [DAG] SimplifyDemandedBits - call ComputeKnownBits for constant non-uniform ISD::SRL shift amounts

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 21 06:53:09 PDT 2023


Author: Simon Pilgrim
Date: 2023-07-21T14:52:57+01:00
New Revision: ae60706da07a128e318ebc383d682e3861337c68

URL: https://github.com/llvm/llvm-project/commit/ae60706da07a128e318ebc383d682e3861337c68
DIFF: https://github.com/llvm/llvm-project/commit/ae60706da07a128e318ebc383d682e3861337c68.diff

LOG: [DAG] SimplifyDemandedBits - call ComputeKnownBits for constant non-uniform ISD::SRL shift amounts

We only attempted to determine KnownBits for uniform constant shift amounts, but ComputeKnownBits is able to handle some non-uniform cases as well that we can use as a fallback.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/LoongArch/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
    llvm/test/CodeGen/X86/combine-srl.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 65ca0e0a22b3b9..a84d35a6ea4e9a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1908,6 +1908,10 @@ bool TargetLowering::SimplifyDemandedBits(
           return TLO.CombineTo(Op, NewOp);
         }
       }
+    } else {
+      // Use generic knownbits computation as it has support for non-uniform
+      // shift amounts.
+      Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
     }
     break;
   }

diff  --git a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
index 2162d4872db527..b9a6ebdcdd22cc 100644
--- a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
+++ b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
@@ -374,8 +374,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; LA32-LABEL: rotl_64_mask_or_128_or_64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    sub.w $a0, $zero, $a2
-; LA32-NEXT:    srl.w $a0, $a1, $a0
+; LA32-NEXT:    move $a0, $zero
 ; LA32-NEXT:    move $a1, $zero
 ; LA32-NEXT:    ret
 ;
@@ -498,7 +497,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; LA32-LABEL: rotr_64_mask_or_128_or_64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    srl.w $a0, $a1, $a2
+; LA32-NEXT:    move $a0, $zero
 ; LA32-NEXT:    move $a1, $zero
 ; LA32-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 160d62ef0818d1..d907a37c2b3d17 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -516,15 +516,12 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; RV32I-LABEL: rotl_32_mask_or_64_or_32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    ori a1, a1, 32
-; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotl_32_mask_or_64_or_32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    negw a1, a1
-; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotl_32_mask_or_64_or_32:
@@ -665,13 +662,12 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; RV32I-LABEL: rotr_32_mask_or_64_or_32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    ori a1, a1, 64
-; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotr_32_mask_or_64_or_32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotr_32_mask_or_64_or_32:
@@ -1001,22 +997,18 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; RV32I-LABEL: rotl_64_mask_or_128_or_64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    neg a0, a2
-; RV32I-NEXT:    srl a0, a1, a0
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotl_64_mask_or_128_or_64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    negw a1, a1
-; RV64I-NEXT:    ori a1, a1, 64
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotl_64_mask_or_128_or_64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    neg a0, a2
-; RV32ZBB-NEXT:    srl a0, a1, a0
+; RV32ZBB-NEXT:    li a0, 0
 ; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
 ;
@@ -1027,8 +1019,7 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ;
 ; RV32XTHEADBB-LABEL: rotl_64_mask_or_128_or_64:
 ; RV32XTHEADBB:       # %bb.0:
-; RV32XTHEADBB-NEXT:    neg a0, a2
-; RV32XTHEADBB-NEXT:    srl a0, a1, a0
+; RV32XTHEADBB-NEXT:    li a0, 0
 ; RV32XTHEADBB-NEXT:    li a1, 0
 ; RV32XTHEADBB-NEXT:    ret
 ;
@@ -1341,19 +1332,18 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; RV32I-LABEL: rotr_64_mask_or_128_or_64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    srl a0, a1, a2
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotr_64_mask_or_128_or_64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    ori a1, a1, 128
-; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotr_64_mask_or_128_or_64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srl a0, a1, a2
+; RV32ZBB-NEXT:    li a0, 0
 ; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
 ;
@@ -1364,7 +1354,7 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ;
 ; RV32XTHEADBB-LABEL: rotr_64_mask_or_128_or_64:
 ; RV32XTHEADBB:       # %bb.0:
-; RV32XTHEADBB-NEXT:    srl a0, a1, a2
+; RV32XTHEADBB-NEXT:    li a0, 0
 ; RV32XTHEADBB-NEXT:    li a1, 0
 ; RV32XTHEADBB-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
index 5ed6aba4441505..5598935bb51107 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
@@ -8,11 +8,10 @@ define <vscale x 1 x i8> @bitreverse_nxv1i8(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vand.vi v9, v8, 15
-; CHECK-NEXT:    vsll.vi v9, v9, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v9, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
@@ -41,11 +40,10 @@ define <vscale x 2 x i8> @bitreverse_nxv2i8(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vand.vi v9, v8, 15
-; CHECK-NEXT:    vsll.vi v9, v9, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v9, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
@@ -74,11 +72,10 @@ define <vscale x 4 x i8> @bitreverse_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vand.vi v9, v8, 15
-; CHECK-NEXT:    vsll.vi v9, v9, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v9, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
@@ -107,11 +104,10 @@ define <vscale x 8 x i8> @bitreverse_nxv8i8(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vi v9, v8, 15
-; CHECK-NEXT:    vsll.vi v9, v9, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v9, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
@@ -140,11 +136,10 @@ define <vscale x 16 x i8> @bitreverse_nxv16i8(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vand.vi v10, v8, 15
-; CHECK-NEXT:    vsll.vi v10, v10, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v10, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v10, v8
 ; CHECK-NEXT:    vsrl.vi v10, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v10, v10, a0
@@ -173,11 +168,10 @@ define <vscale x 32 x i8> @bitreverse_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vand.vi v12, v8, 15
-; CHECK-NEXT:    vsll.vi v12, v12, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v12, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v12, v8
 ; CHECK-NEXT:    vsrl.vi v12, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v12, a0
@@ -206,11 +200,10 @@ define <vscale x 64 x i8> @bitreverse_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: bitreverse_nxv64i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vand.vi v16, v8, 15
-; CHECK-NEXT:    vsll.vi v16, v16, 4
-; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vsrl.vi v16, v8, 4
 ; CHECK-NEXT:    vand.vi v8, v8, 15
-; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    vsll.vi v8, v8, 4
+; CHECK-NEXT:    vor.vv v8, v16, v8
 ; CHECK-NEXT:    vsrl.vi v16, v8, 2
 ; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v16, a0

diff  --git a/llvm/test/CodeGen/X86/combine-srl.ll b/llvm/test/CodeGen/X86/combine-srl.ll
index 5c69fe90559719..380444c62d9f1f 100644
--- a/llvm/test/CodeGen/X86/combine-srl.ll
+++ b/llvm/test/CodeGen/X86/combine-srl.ll
@@ -77,9 +77,7 @@ define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
 ;
 ; AVX-LABEL: combine_vec_lshr_known_zero1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
-; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
   %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11>
@@ -260,11 +258,7 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
 ;
 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51>
   %2 = trunc <4 x i64> %1 to <4 x i32>


        


More information about the llvm-commits mailing list