[llvm] a54f334 - [RISCV] Add shift amount operands of shift, rotate, and Zbs instructions to hasAllNBitUsers.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 24 22:17:11 PDT 2022


Author: Craig Topper
Date: 2022-10-24T22:07:22-07:00
New Revision: a54f3347e8b39cb606dc1b722da1d0f171ab2fcd

URL: https://github.com/llvm/llvm-project/commit/a54f3347e8b39cb606dc1b722da1d0f171ab2fcd
DIFF: https://github.com/llvm/llvm-project/commit/a54f3347e8b39cb606dc1b722da1d0f171ab2fcd.diff

LOG: [RISCV] Add shift amount operands of shift, rotate, and Zbs instructions to hasAllNBitUsers.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/test/CodeGen/RISCV/fpenv.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
    llvm/test/CodeGen/RISCV/shifts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9144f57beffd..0c422d1f9949 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2295,6 +2295,18 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
       if (Bits < 32)
         return false;
       break;
+    case RISCV::SLL:
+    case RISCV::SRA:
+    case RISCV::SRL:
+    case RISCV::ROL:
+    case RISCV::ROR:
+    case RISCV::BSET:
+    case RISCV::BCLR:
+    case RISCV::BINV:
+      // Shift amount operands only use log2(Xlen) bits.
+      if (UI.getOperandNo() != 1 || Bits < Log2_32(Subtarget->getXLen()))
+        return false;
+      break;
     case RISCV::SLLI:
       // SLLI only uses the lower (XLen - ShAmt) bits.
       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))

diff  --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll
index 28fac83b97d9..4866c234a05f 100644
--- a/llvm/test/CodeGen/RISCV/fpenv.ll
+++ b/llvm/test/CodeGen/RISCV/fpenv.ll
@@ -16,7 +16,7 @@ define i32 @func_01() {
 ; RV64IF-LABEL: func_01:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    frrm a0
-; RV64IF-NEXT:    slli a0, a0, 2
+; RV64IF-NEXT:    slliw a0, a0, 2
 ; RV64IF-NEXT:    lui a1, 66
 ; RV64IF-NEXT:    addiw a1, a1, 769
 ; RV64IF-NEXT:    srl a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 24c01605ae73..982885303fa5 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -543,7 +543,7 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind {
 ;
 ; RV64I-LABEL: rotl_64_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    neg a2, a1
+; RV64I-NEXT:    negw a2, a1
 ; RV64I-NEXT:    sll a1, a0, a1
 ; RV64I-NEXT:    srl a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
@@ -646,7 +646,7 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 ; RV64I-LABEL: rotl_64_mask_and_127_and_63:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sll a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    srl a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -788,7 +788,7 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind {
 ;
 ; RV64I-LABEL: rotr_64_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    neg a2, a1
+; RV64I-NEXT:    negw a2, a1
 ; RV64I-NEXT:    srl a1, a0, a1
 ; RV64I-NEXT:    sll a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
@@ -891,7 +891,7 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 ; RV64I-LABEL: rotr_64_mask_and_127_and_63:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srl a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    sll a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -1083,7 +1083,7 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
 ; RV64I-LABEL: rotl_64_mask_shared:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sll a3, a0, a2
-; RV64I-NEXT:    neg a4, a2
+; RV64I-NEXT:    negw a4, a2
 ; RV64I-NEXT:    srl a0, a0, a4
 ; RV64I-NEXT:    or a0, a3, a0
 ; RV64I-NEXT:    sll a1, a1, a2
@@ -1239,7 +1239,7 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
 ; RV64I-LABEL: rotr_64_mask_shared:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srl a3, a0, a2
-; RV64I-NEXT:    neg a4, a2
+; RV64I-NEXT:    negw a4, a2
 ; RV64I-NEXT:    sll a0, a0, a4
 ; RV64I-NEXT:    or a0, a3, a0
 ; RV64I-NEXT:    sll a1, a1, a2
@@ -1398,7 +1398,7 @@ define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV64I-LABEL: rotl_64_mask_multiple:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sll a3, a0, a2
-; RV64I-NEXT:    neg a4, a2
+; RV64I-NEXT:    negw a4, a2
 ; RV64I-NEXT:    srl a0, a0, a4
 ; RV64I-NEXT:    or a0, a3, a0
 ; RV64I-NEXT:    sll a2, a1, a2
@@ -1558,7 +1558,7 @@ define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
 ; RV64I-LABEL: rotr_64_mask_multiple:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srl a3, a0, a2
-; RV64I-NEXT:    neg a4, a2
+; RV64I-NEXT:    negw a4, a2
 ; RV64I-NEXT:    sll a0, a0, a4
 ; RV64I-NEXT:    or a0, a3, a0
 ; RV64I-NEXT:    srl a2, a1, a2

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 2f8213cc80ac..57a90b662c41 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -167,7 +167,7 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: rol_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sll a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    srl a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -245,7 +245,7 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: ror_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srl a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    sll a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
index daba4acaf05d..50fbf4f5797e 100644
--- a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
+++ b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
@@ -182,7 +182,7 @@ define i64 @sll_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: sll_redundant_mask_zeros_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    slliw a1, a1, 2
 ; RV64I-NEXT:    sll a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i64 %b, 2
@@ -216,7 +216,7 @@ define i64 @srl_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: srl_redundant_mask_zeros_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a1, 3
+; RV64I-NEXT:    slliw a1, a1, 3
 ; RV64I-NEXT:    srl a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i64 %b, 3
@@ -247,7 +247,7 @@ define i64 @sra_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: sra_redundant_mask_zeros_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a1, 4
+; RV64I-NEXT:    slliw a1, a1, 4
 ; RV64I-NEXT:    sra a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i64 %b, 4

diff  --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index 2ca173a53940..e1c36448fce1 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -571,7 +571,7 @@ define i64 @fshr64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV64I-LABEL: fshr64_minsize:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srl a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    sll a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret


        


More information about the llvm-commits mailing list