[llvm] 5c38373 - [RISCV] Improve constant materialization for cases that can use LUI+ADDI instead of LUI+ADDIW.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 29 09:01:57 PDT 2022


Author: Craig Topper
Date: 2022-04-29T08:58:32-07:00
New Revision: 5c3837312503b4ef8443951194127c4ba2a03153

URL: https://github.com/llvm/llvm-project/commit/5c3837312503b4ef8443951194127c4ba2a03153
DIFF: https://github.com/llvm/llvm-project/commit/5c3837312503b4ef8443951194127c4ba2a03153.diff

LOG: [RISCV] Improve constant materialization for cases that can use LUI+ADDI instead of LUI+ADDIW.

It's possible that we have a constant that isn't simm32 so we can't
use LUI+ADDIW, but we can use LUI+ADDI. Because ADDI uses a sign
extended constant, it's possible that after subtracting it out, we
end up with a simm32 that maps to LUI.

This patch detects this case after removing Lo12 and before shifting
the value for SLLI.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D124222

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
    llvm/test/CodeGen/RISCV/imm.ll
    llvm/test/CodeGen/RISCV/rv64zbs.ll
    llvm/test/MC/RISCV/rv64i-aliases-valid.s
    llvm/test/MC/RISCV/rv64zbs-aliases-valid.s

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index e70acf07cf87..ea74dde6f715 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -103,43 +103,53 @@ static void generateInstSeqImpl(int64_t Val,
   // performed when the recursion returns.
 
   int64_t Lo12 = SignExtend64<12>(Val);
-  int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
-  int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
-  Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+  Val = (uint64_t)Val - (uint64_t)Lo12;
 
-  // If the remaining bits don't fit in 12 bits, we might be able to reduce the
-  // shift amount in order to use LUI which will zero the lower 12 bits.
+  int ShiftAmount = 0;
   bool Unsigned = false;
-  if (ShiftAmount > 12 && !isInt<12>(Hi52)) {
-    if (isInt<32>((uint64_t)Hi52 << 12)) {
-      // Reduce the shift amount and add zeros to the LSBs so it will match LUI.
-      ShiftAmount -= 12;
-      Hi52 = (uint64_t)Hi52 << 12;
-    } else if (isUInt<32>((uint64_t)Hi52 << 12) &&
-               ActiveFeatures[RISCV::FeatureStdExtZba]) {
-      // Reduce the shift amount and add zeros to the LSBs so it will match
-      // LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
-      ShiftAmount -= 12;
-      Hi52 = ((uint64_t)Hi52 << 12) | (0xffffffffull << 32);
+
+  // Val might now be valid for LUI without needing a shift.
+  if (!isInt<32>(Val)) {
+    ShiftAmount = findFirstSet((uint64_t)Val);
+    Val >>= ShiftAmount;
+
+    // If the remaining bits don't fit in 12 bits, we might be able to reduce the
+    // shift amount in order to use LUI which will zero the lower 12 bits.
+    if (ShiftAmount > 12 && !isInt<12>(Val)) {
+      if (isInt<32>((uint64_t)Val << 12)) {
+        // Reduce the shift amount and add zeros to the LSBs so it will match LUI.
+        ShiftAmount -= 12;
+        Val = (uint64_t)Val << 12;
+      } else if (isUInt<32>((uint64_t)Val << 12) &&
+                 ActiveFeatures[RISCV::FeatureStdExtZba]) {
+        // Reduce the shift amount and add zeros to the LSBs so it will match
+        // LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
+        ShiftAmount -= 12;
+        Val = ((uint64_t)Val << 12) | (0xffffffffull << 32);
+        Unsigned = true;
+      }
+    }
+
+    // Try to use SLLI_UW for Val when it is uint32 but not int32.
+    if (isUInt<32>((uint64_t)Val) && !isInt<32>((uint64_t)Val) &&
+        ActiveFeatures[RISCV::FeatureStdExtZba]) {
+      // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
+      // SLLI_UW.
+      Val = ((uint64_t)Val) | (0xffffffffull << 32);
       Unsigned = true;
     }
   }
 
-  // Try to use SLLI_UW for Hi52 when it is uint32 but not int32.
-  if (isUInt<32>((uint64_t)Hi52) && !isInt<32>((uint64_t)Hi52) &&
-      ActiveFeatures[RISCV::FeatureStdExtZba]) {
-    // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
-    // SLLI_UW.
-    Hi52 = ((uint64_t)Hi52) | (0xffffffffull << 32);
-    Unsigned = true;
-  }
+  generateInstSeqImpl(Val, ActiveFeatures, Res);
 
-  generateInstSeqImpl(Hi52, ActiveFeatures, Res);
+  // Skip shift if we were able to use LUI directly.
+  if (ShiftAmount) {
+    if (Unsigned)
+      Res.push_back(RISCVMatInt::Inst(RISCV::SLLI_UW, ShiftAmount));
+    else
+      Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
+  }
 
-  if (Unsigned)
-    Res.push_back(RISCVMatInt::Inst(RISCV::SLLI_UW, ShiftAmount));
-  else
-    Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
   if (Lo12)
     Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
 }

diff  --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll
index a5571c5a43a7..d54894890528 100644
--- a/llvm/test/CodeGen/RISCV/imm.ll
+++ b/llvm/test/CodeGen/RISCV/imm.ll
@@ -1831,29 +1831,26 @@ define i64 @imm_neg_2147485013() {
 ;
 ; RV64I-LABEL: imm_neg_2147485013:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a0, -1
-; RV64I-NEXT:    slli a0, a0, 31
+; RV64I-NEXT:    lui a0, 524288
 ; RV64I-NEXT:    addi a0, a0, -1365
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm_neg_2147485013:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    li a0, -1
-; RV64IZBA-NEXT:    slli a0, a0, 31
+; RV64IZBA-NEXT:    lui a0, 524288
 ; RV64IZBA-NEXT:    addi a0, a0, -1365
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBB-LABEL: imm_neg_2147485013:
 ; RV64IZBB:       # %bb.0:
-; RV64IZBB-NEXT:    li a0, -1
-; RV64IZBB-NEXT:    slli a0, a0, 31
+; RV64IZBB-NEXT:    lui a0, 524288
 ; RV64IZBB-NEXT:    addi a0, a0, -1365
 ; RV64IZBB-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm_neg_2147485013:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    li a0, -1365
-; RV64IZBS-NEXT:    bclri a0, a0, 31
+; RV64IZBS-NEXT:    lui a0, 524288
+; RV64IZBS-NEXT:    addi a0, a0, -1365
 ; RV64IZBS-NEXT:    ret
   ret i64 -2147485013
 }

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index 1bc5dba763b8..bcefe2730bbb 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -540,8 +540,7 @@ define i64 @bclri_i64_30(i64 %a) nounwind {
 define i64 @bclri_i64_31(i64 %a) nounwind {
 ; RV64I-LABEL: bclri_i64_31:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slli a1, a1, 31
+; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    addi a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/MC/RISCV/rv64i-aliases-valid.s b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
index b9c1326d4d67..ac72c812f67e 100644
--- a/llvm/test/MC/RISCV/rv64i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
@@ -186,6 +186,12 @@ li x12, 0xaaaaaaaa
 # CHECK-ALIAS-NEXT: slli a3, a3, 1
 li x13, 0xffffffff55555556
 
+# CHECK-S-OBJ-NOALIAS: lui t0, 524288
+# CHECK-S-OBJ-NOALIAS-NEXT: addi t0, t0, -1365
+# CHECK-S-OBJ: lui t0, 524288
+# CHECK-S-OBJ-NEXT: addi t0, t0, -1365
+li x5, -2147485013
+
 # CHECK-INST: addi a0, zero, 1110
 # CHECK-ALIAS: li a0, 1110
 li a0, %lo(0x123456)

diff  --git a/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s b/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
index 4a642dfc210d..9d67d2b798ce 100644
--- a/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
@@ -37,12 +37,6 @@ bext x5, x6, 8
 # CHECK-S-OBJ-NEXT: bseti t0, t0, 31
 li x5, 2147485013
 
-# CHECK-S-OBJ-NOALIAS: addi t0, zero, -1365
-# CHECK-S-OBJ-NOALIAS-NEXT: bclri t0, t0, 31
-# CHECK-S-OBJ: li t0, -1365
-# CHECK-S-OBJ-NEXT: bclri t0, t0, 31
-li x5, -2147485013
-
 # CHECK-S-OBJ-NOALIAS: lui t1, 572348
 # CHECK-S-OBJ-NOALIAS-NEXT: addiw t1, t1, -1093
 # CHECK-S-OBJ-NOALIAS-NEXT: bclri t1, t1, 44


        


More information about the llvm-commits mailing list