[llvm] c4806db - [RISCV] Fold LI 1 / SLLI into BSETI during i64 materialization (#142348)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 2 11:43:20 PDT 2025


Author: Piotr Fusik
Date: 2025-06-02T20:43:16+02:00
New Revision: c4806dbda348556d58fa10fa06b1d9dd95bac4c8

URL: https://github.com/llvm/llvm-project/commit/c4806dbda348556d58fa10fa06b1d9dd95bac4c8
DIFF: https://github.com/llvm/llvm-project/commit/c4806dbda348556d58fa10fa06b1d9dd95bac4c8.diff

LOG: [RISCV] Fold LI 1 / SLLI into BSETI during i64 materialization (#142348)

My first approach was to avoid emitting LI 1 / SLLI in the first place.
Unfortunately, that favors BSETI C / ADDI -1 over LI -1 / SRLI 64-C
even though the latter has both instructions compressible.
This is because the code assumes in several places that
a two-instruction sequence (here: BSETI / ADDI) cannot be improved.

Another possible approach would be to keep LI 1 / SLLI if it is to be
later replaced with SRLI. This would be harder to grasp than eventually
patching LI 1 / SLLI with BSETI.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
    llvm/test/CodeGen/RISCV/imm.ll
    llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 8ea2548258fdb..323bcfc11625a 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -353,6 +353,13 @@ InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
       } while (Hi != 0);
       Res = TmpSeq;
     }
+
+    // Fold LI 1 + SLLI into BSETI.
+    if (Res[0].getOpcode() == RISCV::ADDI && Res[0].getImm() == 1 &&
+        Res[1].getOpcode() == RISCV::SLLI) {
+      Res.erase(Res.begin());                                 // Remove ADDI.
+      Res.front() = Inst(RISCV::BSETI, Res.front().getImm()); // Patch SLLI.
+    }
   }
 
   // Perform optimization with BCLRI in the Zbs extension.

diff  --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll
index fc3af22f082d7..418407d9b7cd6 100644
--- a/llvm/test/CodeGen/RISCV/imm.ll
+++ b/llvm/test/CodeGen/RISCV/imm.ll
@@ -4674,8 +4674,7 @@ define i64 @imm64_0xFFFFFFFF0() {
 ;
 ; RV64IZBS-LABEL: imm64_0xFFFFFFFF0:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    li a0, 1
-; RV64IZBS-NEXT:    slli a0, a0, 36
+; RV64IZBS-NEXT:    bseti a0, zero, 36
 ; RV64IZBS-NEXT:    addi a0, a0, -16
 ; RV64IZBS-NEXT:    ret
 ;
@@ -4737,8 +4736,7 @@ define i64 @imm64_0x1FFFFFF08() {
 ;
 ; RV64IZBS-LABEL: imm64_0x1FFFFFF08:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    li a0, 1
-; RV64IZBS-NEXT:    slli a0, a0, 33
+; RV64IZBS-NEXT:    bseti a0, zero, 33
 ; RV64IZBS-NEXT:    addi a0, a0, -248
 ; RV64IZBS-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll b/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
index 0a7dd57d03969..eb81e0f13d14a 100644
--- a/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
@@ -397,18 +397,27 @@ define i64 @and_or_or(i64 %x, i64 %y) {
 ; RV32-NEXT:    and a1, a1, a3
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: and_or_or:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a2, -1
-; RV64-NEXT:    slli a2, a2, 33
-; RV64-NEXT:    addi a2, a2, 1
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    li a2, 1
-; RV64-NEXT:    slli a2, a2, 33
-; RV64-NEXT:    addi a2, a2, -2
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    and a0, a0, a1
-; RV64-NEXT:    ret
+; NOZBS64-LABEL: and_or_or:
+; NOZBS64:       # %bb.0:
+; NOZBS64-NEXT:    li a2, -1
+; NOZBS64-NEXT:    slli a2, a2, 33
+; NOZBS64-NEXT:    addi a2, a2, 1
+; NOZBS64-NEXT:    or a0, a0, a2
+; NOZBS64-NEXT:    li a2, 1
+; NOZBS64-NEXT:    slli a2, a2, 33
+; NOZBS64-NEXT:    addi a2, a2, -2
+; NOZBS64-NEXT:    or a1, a1, a2
+; NOZBS64-NEXT:    and a0, a0, a1
+; NOZBS64-NEXT:    ret
+;
+; ZBS64-LABEL: and_or_or:
+; ZBS64:       # %bb.0:
+; ZBS64-NEXT:    bseti a2, zero, 33
+; ZBS64-NEXT:    addi a2, a2, -2
+; ZBS64-NEXT:    orn a0, a0, a2
+; ZBS64-NEXT:    or a1, a1, a2
+; ZBS64-NEXT:    and a0, a0, a1
+; ZBS64-NEXT:    ret
   %a = or i64 %x, -8589934591
   %b = or i64 %y, 8589934590
   %c = and i64 %a, %b


        


More information about the llvm-commits mailing list