[llvm] ec11fbb - [RISCV] Use default promotion for (i32 (shl 1, X)) on RV64 when Zbs is enabled.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 28 09:59:13 PDT 2022


Author: Craig Topper
Date: 2022-04-28T09:58:30-07:00
New Revision: ec11fbb1d682e9c3e67eafc036c92fe9200b40f5

URL: https://github.com/llvm/llvm-project/commit/ec11fbb1d682e9c3e67eafc036c92fe9200b40f5
DIFF: https://github.com/llvm/llvm-project/commit/ec11fbb1d682e9c3e67eafc036c92fe9200b40f5.diff

LOG: [RISCV] Use default promotion for (i32 (shl 1, X)) on RV64 when Zbs is enabled.

This improves opportunities to use bset/bclr/binv. Unfortunately,
there are no W versions of these instrcutions so this isn't always
a clear win. If we use SLLW we get free sign extend and shift masking,
but need to put a 1 in a register and can't remove an or/xor. If
we use bset/bclr/binv we remove the immediate materializationg and
logic op, but might need a mask on the shift amount and sext.w.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D124096

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rv64zbs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2b3fa1685db4..5f555bb28fa8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6822,6 +6822,10 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
            "Unexpected custom legalisation");
     if (N->getOperand(1).getOpcode() != ISD::Constant) {
+      // If we can use a BSET instruction, allow default promotion to apply.
+      if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
+          isOneConstant(N->getOperand(0)))
+        break;
       Results.push_back(customLegalizeToWOp(N, DAG));
       break;
     }

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index edd9e96c2d74..1bc5dba763b8 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -15,10 +15,9 @@ define signext i32 @bclr_i32(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: bclr_i32:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    not a1, a1
-; RV64ZBS-NEXT:    and a0, a1, a0
+; RV64ZBS-NEXT:    andi a1, a1, 31
+; RV64ZBS-NEXT:    bclr a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %and = and i32 %b, 31
   %shl = shl nuw i32 1, %and
@@ -38,10 +37,8 @@ define signext i32 @bclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: bclr_i32_no_mask:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    not a1, a1
-; RV64ZBS-NEXT:    and a0, a1, a0
+; RV64ZBS-NEXT:    bclr a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %shl = shl i32 1, %b
   %neg = xor i32 %shl, -1
@@ -62,10 +59,8 @@ define signext i32 @bclr_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64ZBS-LABEL: bclr_i32_load:
 ; RV64ZBS:       # %bb.0:
 ; RV64ZBS-NEXT:    lw a0, 0(a0)
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    not a1, a1
-; RV64ZBS-NEXT:    and a0, a1, a0
+; RV64ZBS-NEXT:    bclr a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %a = load i32, i32* %p
   %shl = shl i32 1, %b
@@ -123,9 +118,9 @@ define signext i32 @bset_i32(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: bset_i32:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    or a0, a1, a0
+; RV64ZBS-NEXT:    andi a1, a1, 31
+; RV64ZBS-NEXT:    bset a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %and = and i32 %b, 31
   %shl = shl nuw i32 1, %and
@@ -143,9 +138,8 @@ define signext i32 @bset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: bset_i32_no_mask:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    or a0, a1, a0
+; RV64ZBS-NEXT:    bset a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %shl = shl i32 1, %b
   %or = or i32 %shl, %a
@@ -164,9 +158,8 @@ define signext i32 @bset_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64ZBS-LABEL: bset_i32_load:
 ; RV64ZBS:       # %bb.0:
 ; RV64ZBS-NEXT:    lw a0, 0(a0)
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    or a0, a1, a0
+; RV64ZBS-NEXT:    bset a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %a = load i32, i32* %p
   %shl = shl i32 1, %b
@@ -184,8 +177,8 @@ define signext i32 @bset_i32_zero(i32 signext %a) nounwind {
 ;
 ; RV64ZBS-LABEL: bset_i32_zero:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a1, 1
-; RV64ZBS-NEXT:    sllw a0, a1, a0
+; RV64ZBS-NEXT:    bset a0, zero, a0
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %shl = shl i32 1, %a
   ret i32 %shl
@@ -252,9 +245,9 @@ define signext i32 @binv_i32(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: binv_i32:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    xor a0, a1, a0
+; RV64ZBS-NEXT:    andi a1, a1, 31
+; RV64ZBS-NEXT:    binv a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %and = and i32 %b, 31
   %shl = shl nuw i32 1, %and
@@ -272,9 +265,8 @@ define signext i32 @binv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: binv_i32_no_mask:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    xor a0, a1, a0
+; RV64ZBS-NEXT:    binv a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %shl = shl i32 1, %b
   %xor = xor i32 %shl, %a
@@ -293,9 +285,8 @@ define signext i32 @binv_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64ZBS-LABEL: binv_i32_load:
 ; RV64ZBS:       # %bb.0:
 ; RV64ZBS-NEXT:    lw a0, 0(a0)
-; RV64ZBS-NEXT:    li a2, 1
-; RV64ZBS-NEXT:    sllw a1, a2, a1
-; RV64ZBS-NEXT:    xor a0, a1, a0
+; RV64ZBS-NEXT:    binv a0, a0, a1
+; RV64ZBS-NEXT:    sext.w a0, a0
 ; RV64ZBS-NEXT:    ret
   %a = load i32, i32* %p
   %shl = shl i32 1, %b


        


More information about the llvm-commits mailing list