[llvm] [RISCV] Add isel special case for (and (shl X, c2), c1) -> (slli_uw (srli x, c4-c2), c4). (PR #91638)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu May 9 13:58:11 PDT 2024


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/91638

>From 292763d3eb0930ca029f7a5690fff16b66e19bc2 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 9 May 2024 11:14:07 -0700
Subject: [PATCH 1/4] [RISCV] Add isel special case for (and (shl X, c2), c1)
 -> (slli_uw (srli x, c4-c2), c4).

Where c1 is a shifted mask with 32 set bits and c4 trailing zeros.

This is an alternative to #91626.
---
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 19 +++++++++++++--
 llvm/test/CodeGen/RISCV/rv64zba.ll          | 27 ++++++++++++++++++---
 2 files changed, 40 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index e73a3af92af6f..6fd16210aade9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1322,11 +1322,11 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
         }
       }
 
-      // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
-      // shifted by c2 bits with c3 leading zeros.
       if (LeftShift && isShiftedMask_64(C1)) {
         unsigned Leading = XLen - llvm::bit_width(C1);
 
+        // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
+        // shifted by c2 bits with c3 leading zeros.
         if (C2 + Leading < XLen &&
             C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
           // Use slli.uw when possible.
@@ -1350,6 +1350,21 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
             return;
           }
         }
+
+        // Turn (and (shl x, c2), c1) -> (slli_uw (srli x, c4-c2), c4) where c1
+        // is shifted mask with 32 set bits and c4 trailing zeros.
+        unsigned Trailing = llvm::countr_zero(C1);
+        if (Leading + Trailing == 32 && C2 < Trailing &&
+            Subtarget->hasStdExtZba() && OneUseOrZExtW) {
+          SDNode *SRLI = CurDAG->getMachineNode(
+              RISCV::SRLI, DL, VT, X,
+              CurDAG->getTargetConstant(Trailing - C2, DL, VT));
+          SDNode *SLLI_UW = CurDAG->getMachineNode(
+              RISCV::SLLI_UW, DL, VT, SDValue(SRLI, 0),
+              CurDAG->getTargetConstant(Trailing, DL, VT));
+          ReplaceNode(Node, SLLI_UW);
+          return;
+        }
       }
 
       // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 8fe221f2a297a..867775452e0c0 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -2866,8 +2866,7 @@ define ptr @gep_lshr_i32(ptr %0, i64 %1) {
 ;
 ; RV64ZBA-LABEL: gep_lshr_i32:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    slli a1, a1, 2
-; RV64ZBA-NEXT:    srli a1, a1, 4
+; RV64ZBA-NEXT:    srli a1, a1, 2
 ; RV64ZBA-NEXT:    slli.uw a1, a1, 4
 ; RV64ZBA-NEXT:    sh2add a1, a1, a1
 ; RV64ZBA-NEXT:    add a0, a0, a1
@@ -2891,8 +2890,7 @@ define i64 @srli_slliw(i64 %1) {
 ;
 ; RV64ZBA-LABEL: srli_slliw:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    slli a0, a0, 2
-; RV64ZBA-NEXT:    srli a0, a0, 4
+; RV64ZBA-NEXT:    srli a0, a0, 2
 ; RV64ZBA-NEXT:    slli.uw a0, a0, 4
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2902,6 +2900,27 @@ entry:
   ret i64 %4
 }
 
+define i64 @srli_slliw_canonical(i64 %0) {
+; RV64I-LABEL: srli_slliw_canonical:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    li a1, 1
+; RV64I-NEXT:    slli a1, a1, 36
+; RV64I-NEXT:    addi a1, a1, -16
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: srli_slliw_canonical:
+; RV64ZBA:       # %bb.0: # %entry
+; RV64ZBA-NEXT:    srli a0, a0, 2
+; RV64ZBA-NEXT:    slli.uw a0, a0, 4
+; RV64ZBA-NEXT:    ret
+entry:
+  %1 = shl i64 %0, 2
+  %2 = and i64 %1, 68719476720
+  ret i64 %2
+}
+
 define i64 @srli_slli_i16(i64 %1) {
 ; CHECK-LABEL: srli_slli_i16:
 ; CHECK:       # %bb.0: # %entry

>From fc3c0b128041b74b5e204a7b43035bf34e7e9ee8 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 9 May 2024 13:08:19 -0700
Subject: [PATCH 2/4] fixup! ensure Leading is greater than 0.

---
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 6fd16210aade9..f739b2dc778c7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1354,7 +1354,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
         // Turn (and (shl x, c2), c1) -> (slli_uw (srli x, c4-c2), c4) where c1
         // is shifted mask with 32 set bits and c4 trailing zeros.
         unsigned Trailing = llvm::countr_zero(C1);
-        if (Leading + Trailing == 32 && C2 < Trailing &&
+        if (Leading > 0 && Leading + Trailing == 32 && C2 < Trailing &&
             Subtarget->hasStdExtZba() && OneUseOrZExtW) {
           SDNode *SRLI = CurDAG->getMachineNode(
               RISCV::SRLI, DL, VT, X,

>From 0880607b854c8418ea362a66f6e514c369f52b6b Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 9 May 2024 13:55:00 -0700
Subject: [PATCH 3/4] fixup! Move code to a slighty different place with
 similar code.

---
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 28 ++++++++++-----------
 1 file changed, 13 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index f739b2dc778c7..34d162852c9d7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1350,21 +1350,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
             return;
           }
         }
-
-        // Turn (and (shl x, c2), c1) -> (slli_uw (srli x, c4-c2), c4) where c1
-        // is shifted mask with 32 set bits and c4 trailing zeros.
-        unsigned Trailing = llvm::countr_zero(C1);
-        if (Leading > 0 && Leading + Trailing == 32 && C2 < Trailing &&
-            Subtarget->hasStdExtZba() && OneUseOrZExtW) {
-          SDNode *SRLI = CurDAG->getMachineNode(
-              RISCV::SRLI, DL, VT, X,
-              CurDAG->getTargetConstant(Trailing - C2, DL, VT));
-          SDNode *SLLI_UW = CurDAG->getMachineNode(
-              RISCV::SLLI_UW, DL, VT, SDValue(SRLI, 0),
-              CurDAG->getTargetConstant(Trailing, DL, VT));
-          ReplaceNode(Node, SLLI_UW);
-          return;
-        }
       }
 
       // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
@@ -1431,6 +1416,19 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
           ReplaceNode(Node, SLLI);
           return;
         }
+
+        // If we have 32 bits in the mask, we can use SLLI_UW instead of SLLI.
+        if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
+            Subtarget->hasStdExtZba()) {
+          SDNode *SRLI = CurDAG->getMachineNode(
+              RISCV::SRLI, DL, VT, X,
+              CurDAG->getTargetConstant(Trailing - C2, DL, VT));
+          SDNode *SLLI_UW = CurDAG->getMachineNode(
+              RISCV::SLLI_UW, DL, VT, SDValue(SRLI, 0),
+              CurDAG->getTargetConstant(Trailing, DL, VT));
+          ReplaceNode(Node, SLLI_UW);
+          return;
+        }
       }
     }
 

>From bc98e86bfad57b1b3a5dd6f8b5f96b2ec415a3d4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 9 May 2024 13:57:01 -0700
Subject: [PATCH 4/4] fixup! Add negative test that was previously broken.

---
 llvm/test/CodeGen/RISCV/rv64zba.ll | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 867775452e0c0..dc93c0215a25c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -2921,6 +2921,19 @@ entry:
   ret i64 %2
 }
 
+; Make sure we don't accidentally use slli.uw with a shift of 32.
+define i64 @srli_slliuw_negative_test(i64 %0) {
+; CHECK-LABEL: srli_slliuw_negative_test:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    srli a0, a0, 6
+; CHECK-NEXT:    slli a0, a0, 32
+; CHECK-NEXT:    ret
+entry:
+  %1 = lshr i64 %0, 6
+  %2 = shl i64 %1, 32
+  ret i64 %2
+}
+
 define i64 @srli_slli_i16(i64 %1) {
 ; CHECK-LABEL: srli_slli_i16:
 ; CHECK:       # %bb.0: # %entry



More information about the llvm-commits mailing list