[llvm] bd5f124 - [RISCV] Add SimplifyDemandedBits support for FSR/FSL/FSRW/FSLW.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 5 21:45:03 PST 2022


Author: Craig Topper
Date: 2022-03-05T21:26:51-08:00
New Revision: bd5f1247166a93bb5253db0116c474bc72e42505

URL: https://github.com/llvm/llvm-project/commit/bd5f1247166a93bb5253db0116c474bc72e42505
DIFF: https://github.com/llvm/llvm-project/commit/bd5f1247166a93bb5253db0116c474bc72e42505.diff

LOG: [RISCV] Add SimplifyDemandedBits support for FSR/FSL/FSRW/FSLW.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rv32zbt-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbt-intrinsic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4cdc06dd89bdd..60a716cbdc344 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8207,6 +8207,21 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
 
     break;
   }
+  case RISCVISD::FSR:
+  case RISCVISD::FSL:
+  case RISCVISD::FSRW:
+  case RISCVISD::FSLW: {
+    bool IsWInstruction =
+        N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
+    unsigned BitWidth =
+        IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
+    assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
+    // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
+    if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
+      return SDValue(N, 0);
+
+    break;
+  }
   case RISCVISD::FMV_X_ANYEXTH:
   case RISCVISD::FMV_X_ANYEXTW_RV64: {
     SDLoc DL(N);

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbt-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbt-intrinsic.ll
index 968ac227ec770..6383acf8e2e9d 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbt-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbt-intrinsic.ll
@@ -13,6 +13,17 @@ define i32 @fsl_i32(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %1
 }
 
+define i32 @fsl_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
+; RV32ZBT-LABEL: fsl_i32_demandedbits:
+; RV32ZBT:       # %bb.0:
+; RV32ZBT-NEXT:    andi a1, a1, 31
+; RV32ZBT-NEXT:    fsl a0, a0, a1, a2
+; RV32ZBT-NEXT:    ret
+  %bmask = and i32 %b, 95
+  %1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %bmask, i32 %c)
+  ret i32 %1
+}
+
 declare i32 @llvm.riscv.fsr.i32(i32, i32, i32)
 
 define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
@@ -24,6 +35,17 @@ define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %1
 }
 
+define i32 @fsr_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
+; RV32ZBT-LABEL: fsr_i32_demandedbits:
+; RV32ZBT:       # %bb.0:
+; RV32ZBT-NEXT:    andi a1, a1, 31
+; RV32ZBT-NEXT:    fsr a0, a0, a1, a2
+; RV32ZBT-NEXT:    ret
+  %bmask = and i32 %b, 95
+  %1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %bmask, i32 %c)
+  ret i32 %1
+}
+
 define i32 @fsli_i32(i32 %a, i32 %b) nounwind {
 ; RV32ZBT-LABEL: fsli_i32:
 ; RV32ZBT:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbt-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbt-intrinsic.ll
index a22212be90dc2..f7d2913ef839c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbt-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbt-intrinsic.ll
@@ -13,6 +13,17 @@ define i32 @fsl_i32(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %1
 }
 
+define i32 @fsl_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
+; RV64ZBT-LABEL: fsl_i32_demandedbits:
+; RV64ZBT:       # %bb.0:
+; RV64ZBT-NEXT:    andi a1, a1, 31
+; RV64ZBT-NEXT:    fslw a0, a0, a1, a2
+; RV64ZBT-NEXT:    ret
+  %bmask = and i32 %b, 95
+  %1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %bmask, i32 %c)
+  ret i32 %1
+}
+
 declare i32 @llvm.riscv.fsr.i32(i32, i32, i32)
 
 define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
@@ -24,6 +35,17 @@ define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %1
 }
 
+define i32 @fsr_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
+; RV64ZBT-LABEL: fsr_i32_demandedbits:
+; RV64ZBT:       # %bb.0:
+; RV64ZBT-NEXT:    andi a1, a1, 31
+; RV64ZBT-NEXT:    fsrw a0, a0, a1, a2
+; RV64ZBT-NEXT:    ret
+  %bmask = and i32 %b, 95
+  %1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %bmask, i32 %c)
+  ret i32 %1
+}
+
 define i32 @fsli_i32(i32 %a, i32 %b) nounwind {
 ; RV64ZBT-LABEL: fsli_i32:
 ; RV64ZBT:       # %bb.0:
@@ -53,6 +75,17 @@ define i64 @fsl_i64(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %1
 }
 
+define i64 @fsl_i64_demandedbits(i64 %a, i64 %b, i64 %c) nounwind {
+; RV64ZBT-LABEL: fsl_i64_demandedbits:
+; RV64ZBT:       # %bb.0:
+; RV64ZBT-NEXT:    andi a1, a1, 63
+; RV64ZBT-NEXT:    fsl a0, a0, a1, a2
+; RV64ZBT-NEXT:    ret
+  %bmask = and i64 %b, 191
+  %1 = call i64 @llvm.riscv.fsl.i64(i64 %a, i64 %bmask, i64 %c)
+  ret i64 %1
+}
+
 declare i64 @llvm.riscv.fsr.i64(i64, i64, i64)
 
 define i64 @fsr_i64(i64 %a, i64 %b, i64 %c) nounwind {
@@ -64,6 +97,17 @@ define i64 @fsr_i64(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %1
 }
 
+define i64 @fsr_i64_demandedbits(i64 %a, i64 %b, i64 %c) nounwind {
+; RV64ZBT-LABEL: fsr_i64_demandedbits:
+; RV64ZBT:       # %bb.0:
+; RV64ZBT-NEXT:    andi a1, a1, 63
+; RV64ZBT-NEXT:    fsr a0, a0, a1, a2
+; RV64ZBT-NEXT:    ret
+  %bmask = and i64 %b, 191
+  %1 = call i64 @llvm.riscv.fsr.i64(i64 %a, i64 %bmask, i64 %c)
+  ret i64 %1
+}
+
 define i64 @fsli_i64(i64 %a, i64 %b) nounwind {
 ; RV64ZBT-LABEL: fsli_i64:
 ; RV64ZBT:       # %bb.0:


        


More information about the llvm-commits mailing list