[llvm] 1806ce9 - [RISCV] Teach RISCVMatInt to prefer li+slli over lui+addi(w) for compressibility.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 6 10:39:10 PST 2022
Author: Craig Topper
Date: 2022-12-06T10:31:17-08:00
New Revision: 1806ce9097a635aa7a5530b5bf52547c78c87479
URL: https://github.com/llvm/llvm-project/commit/1806ce9097a635aa7a5530b5bf52547c78c87479
DIFF: https://github.com/llvm/llvm-project/commit/1806ce9097a635aa7a5530b5bf52547c78c87479.diff
LOG: [RISCV] Teach RISCVMatInt to prefer li+slli over lui+addi(w) for compressibility.
With C extension, li with a 6 bit immediate followed by slli is 4 bytes.
The lui+addi(w) sequence is at least 6 bytes.
The two sequences probably have similar execution latency. The exception
being if the target supports lui+addi(w) macrofusion.
Since the execution latency is probably the same I didn't restrict
this to C extension.
Reviewed By: reames
Differential Revision: https://reviews.llvm.org/D139135
Added:
Modified:
llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
llvm/test/CodeGen/RISCV/calling-conv-half.ll
llvm/test/CodeGen/RISCV/i32-icmp.ll
llvm/test/CodeGen/RISCV/i64-icmp.ll
llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
llvm/test/CodeGen/RISCV/mul.ll
llvm/test/CodeGen/RISCV/pr58511.ll
llvm/test/CodeGen/RISCV/rv32zbs.ll
llvm/test/CodeGen/RISCV/rv64zbs.ll
llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll
llvm/test/MC/RISCV/rv32c-aliases-valid.s
llvm/test/MC/RISCV/rv32i-aliases-valid.s
llvm/test/MC/RISCV/rv64c-aliases-valid.s
llvm/test/MC/RISCV/rv64i-aliases-valid.s
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index f4e8a98b59375..71855b546be42 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -178,15 +178,21 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
// If the low 12 bits are non-zero, the first expansion may end with an ADDI
// or ADDIW. If there are trailing zeros, try generating a sign extended
// constant with no trailing zeros and use a final SLLI to restore them.
- if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() > 2) {
+ if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) {
unsigned TrailingZeros = countTrailingZeros((uint64_t)Val);
int64_t ShiftedVal = Val >> TrailingZeros;
+ // If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since
+ // its more compressible. But only if LUI+ADDI(W) isn't fusable.
+ // NOTE: We don't check for C extension to minimize
diff erences in generated
+ // code.
+ bool IsShiftedCompressible =
+ isInt<6>(ShiftedVal) && !ActiveFeatures[RISCV::TuneLUIADDIFusion];
RISCVMatInt::InstSeq TmpSeq;
generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros);
// Keep the new sequence if it is an improvement.
- if (TmpSeq.size() < Res.size())
+ if (TmpSeq.size() < Res.size() || IsShiftedCompressible)
Res = TmpSeq;
}
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index d14877a8aa959..18f7d3b5a817c 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -529,14 +529,14 @@ define i32 @caller_half_on_stack() nounwind {
define half @callee_half_ret() nounwind {
; RV32I-LABEL: callee_half_ret:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a0, 4
-; RV32I-NEXT: addi a0, a0, -1024
+; RV32I-NEXT: li a0, 15
+; RV32I-NEXT: slli a0, a0, 10
; RV32I-NEXT: ret
;
; RV64I-LABEL: callee_half_ret:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a0, 4
-; RV64I-NEXT: addiw a0, a0, -1024
+; RV64I-NEXT: li a0, 15
+; RV64I-NEXT: slli a0, a0, 10
; RV64I-NEXT: ret
;
; RV32IF-LABEL: callee_half_ret:
diff --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll
index dc9c545ae1c0a..3cbebf309e35f 100644
--- a/llvm/test/CodeGen/RISCV/i32-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll
@@ -598,8 +598,8 @@ define i32 @icmp_slt_constant_2047(i32 %a) nounwind {
define i32 @icmp_slt_constant_2048(i32 %a) nounwind {
; RV32I-LABEL: icmp_slt_constant_2048:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 1
-; RV32I-NEXT: addi a1, a1, -2048
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: slli a1, a1, 11
; RV32I-NEXT: slt a0, a0, a1
; RV32I-NEXT: ret
%1 = icmp slt i32 %a, 2048
@@ -663,8 +663,8 @@ define i32 @icmp_sle_constant_2046(i32 %a) nounwind {
define i32 @icmp_sle_constant_2047(i32 %a) nounwind {
; RV32I-LABEL: icmp_sle_constant_2047:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 1
-; RV32I-NEXT: addi a1, a1, -2048
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: slli a1, a1, 11
; RV32I-NEXT: slt a0, a0, a1
; RV32I-NEXT: ret
%1 = icmp sle i32 %a, 2047
diff --git a/llvm/test/CodeGen/RISCV/i64-icmp.ll b/llvm/test/CodeGen/RISCV/i64-icmp.ll
index 4527379946015..e7efe76968cc0 100644
--- a/llvm/test/CodeGen/RISCV/i64-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/i64-icmp.ll
@@ -598,8 +598,8 @@ define i64 @icmp_slt_constant_2047(i64 %a) nounwind {
define i64 @icmp_slt_constant_2048(i64 %a) nounwind {
; RV64I-LABEL: icmp_slt_constant_2048:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -2048
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 11
; RV64I-NEXT: slt a0, a0, a1
; RV64I-NEXT: ret
%1 = icmp slt i64 %a, 2048
@@ -663,8 +663,8 @@ define i64 @icmp_sle_constant_2046(i64 %a) nounwind {
define i64 @icmp_sle_constant_2047(i64 %a) nounwind {
; RV64I-LABEL: icmp_sle_constant_2047:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -2048
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 11
; RV64I-NEXT: slt a0, a0, a1
; RV64I-NEXT: ret
%1 = icmp sle i64 %a, 2047
diff --git a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
index b9719f0f66c08..44e4bb31d048f 100644
--- a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
+++ b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
@@ -26,3 +26,19 @@ define void @foo(i32 signext %0, i32 signext %1) {
}
declare void @bar(i8*, float)
+
+; Test that we prefer lui+addiw over li+slli.
+define i32 @test_matint() {
+; NOFUSION-LABEL: test_matint:
+; NOFUSION: # %bb.0:
+; NOFUSION-NEXT: li a0, 1
+; NOFUSION-NEXT: slli a0, a0, 11
+; NOFUSION-NEXT: ret
+;
+; FUSION-LABEL: test_matint:
+; FUSION: # %bb.0:
+; FUSION-NEXT: lui a0, 1
+; FUSION-NEXT: addiw a0, a0, -2048
+; FUSION-NEXT: ret
+ ret i32 2048
+}
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index ee060201afa31..a09db37ad2400 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -819,8 +819,8 @@ define i32 @muli32_p4352(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p4352:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a1, 1
-; RV32IM-NEXT: addi a1, a1, 256
+; RV32IM-NEXT: li a1, 17
+; RV32IM-NEXT: slli a1, a1, 8
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -851,8 +851,8 @@ define i32 @muli32_p3840(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p3840:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a1, 1
-; RV32IM-NEXT: addi a1, a1, -256
+; RV32IM-NEXT: li a1, 15
+; RV32IM-NEXT: slli a1, a1, 8
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -883,8 +883,8 @@ define i32 @muli32_m3840(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_m3840:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a1, 1048575
-; RV32IM-NEXT: addi a1, a1, 256
+; RV32IM-NEXT: li a1, -15
+; RV32IM-NEXT: slli a1, a1, 8
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -908,14 +908,14 @@ define i32 @muli32_m3840(i32 %a) nounwind {
define i32 @muli32_m4352(i32 %a) nounwind {
; RV32I-LABEL: muli32_m4352:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 1048575
-; RV32I-NEXT: addi a1, a1, -256
+; RV32I-NEXT: li a1, -17
+; RV32I-NEXT: slli a1, a1, 8
; RV32I-NEXT: tail __mulsi3 at plt
;
; RV32IM-LABEL: muli32_m4352:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a1, 1048575
-; RV32IM-NEXT: addi a1, a1, -256
+; RV32IM-NEXT: li a1, -17
+; RV32IM-NEXT: slli a1, a1, 8
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -923,8 +923,8 @@ define i32 @muli32_m4352(i32 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a1, 1048575
-; RV64I-NEXT: addiw a1, a1, -256
+; RV64I-NEXT: li a1, -17
+; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: call __muldi3 at plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -932,8 +932,8 @@ define i32 @muli32_m4352(i32 %a) nounwind {
;
; RV64IM-LABEL: muli32_m4352:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 1048575
-; RV64IM-NEXT: addiw a1, a1, -256
+; RV64IM-NEXT: li a1, -17
+; RV64IM-NEXT: slli a1, a1, 8
; RV64IM-NEXT: mulw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, -4352
@@ -959,8 +959,8 @@ define i64 @muli64_p4352(i64 %a) nounwind {
;
; RV32IM-LABEL: muli64_p4352:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a2, 1
-; RV32IM-NEXT: addi a2, a2, 256
+; RV32IM-NEXT: li a2, 17
+; RV32IM-NEXT: slli a2, a2, 8
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: mulhu a3, a0, a2
; RV32IM-NEXT: add a1, a3, a1
@@ -976,8 +976,8 @@ define i64 @muli64_p4352(i64 %a) nounwind {
;
; RV64IM-LABEL: muli64_p4352:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 1
-; RV64IM-NEXT: addiw a1, a1, 256
+; RV64IM-NEXT: li a1, 17
+; RV64IM-NEXT: slli a1, a1, 8
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, 4352
@@ -1003,8 +1003,8 @@ define i64 @muli64_p3840(i64 %a) nounwind {
;
; RV32IM-LABEL: muli64_p3840:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a2, 1
-; RV32IM-NEXT: addi a2, a2, -256
+; RV32IM-NEXT: li a2, 15
+; RV32IM-NEXT: slli a2, a2, 8
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: mulhu a3, a0, a2
; RV32IM-NEXT: add a1, a3, a1
@@ -1020,8 +1020,8 @@ define i64 @muli64_p3840(i64 %a) nounwind {
;
; RV64IM-LABEL: muli64_p3840:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 1
-; RV64IM-NEXT: addiw a1, a1, -256
+; RV64IM-NEXT: li a1, 15
+; RV64IM-NEXT: slli a1, a1, 8
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, 3840
@@ -1033,8 +1033,8 @@ define i64 @muli64_m4352(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lui a2, 1048575
-; RV32I-NEXT: addi a2, a2, -256
+; RV32I-NEXT: li a2, -17
+; RV32I-NEXT: slli a2, a2, 8
; RV32I-NEXT: li a3, -1
; RV32I-NEXT: call __muldi3 at plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1043,8 +1043,8 @@ define i64 @muli64_m4352(i64 %a) nounwind {
;
; RV32IM-LABEL: muli64_m4352:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a2, 1048575
-; RV32IM-NEXT: addi a2, a2, -256
+; RV32IM-NEXT: li a2, -17
+; RV32IM-NEXT: slli a2, a2, 8
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: mulhu a3, a0, a2
; RV32IM-NEXT: sub a3, a3, a0
@@ -1054,14 +1054,14 @@ define i64 @muli64_m4352(i64 %a) nounwind {
;
; RV64I-LABEL: muli64_m4352:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1048575
-; RV64I-NEXT: addiw a1, a1, -256
+; RV64I-NEXT: li a1, -17
+; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: tail __muldi3 at plt
;
; RV64IM-LABEL: muli64_m4352:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 1048575
-; RV64IM-NEXT: addiw a1, a1, -256
+; RV64IM-NEXT: li a1, -17
+; RV64IM-NEXT: slli a1, a1, 8
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, -4352
@@ -1087,8 +1087,8 @@ define i64 @muli64_m3840(i64 %a) nounwind {
;
; RV32IM-LABEL: muli64_m3840:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a2, 1048575
-; RV32IM-NEXT: addi a2, a2, 256
+; RV32IM-NEXT: li a2, -15
+; RV32IM-NEXT: slli a2, a2, 8
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: mulhu a3, a0, a2
; RV32IM-NEXT: sub a3, a3, a0
@@ -1105,8 +1105,8 @@ define i64 @muli64_m3840(i64 %a) nounwind {
;
; RV64IM-LABEL: muli64_m3840:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 1048575
-; RV64IM-NEXT: addiw a1, a1, 256
+; RV64IM-NEXT: li a1, -15
+; RV64IM-NEXT: slli a1, a1, 8
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, -3840
@@ -1171,8 +1171,8 @@ define i128 @muli128_m3840(i128 %a) nounwind {
; RV32IM-NEXT: lw a3, 8(a1)
; RV32IM-NEXT: lw a4, 0(a1)
; RV32IM-NEXT: lw a1, 4(a1)
-; RV32IM-NEXT: lui a5, 1048575
-; RV32IM-NEXT: addi a5, a5, 256
+; RV32IM-NEXT: li a5, -15
+; RV32IM-NEXT: slli a5, a5, 8
; RV32IM-NEXT: mulhu a6, a4, a5
; RV32IM-NEXT: mul a7, a1, a5
; RV32IM-NEXT: add a6, a7, a6
@@ -1236,8 +1236,8 @@ define i128 @muli128_m3840(i128 %a) nounwind {
;
; RV64IM-LABEL: muli128_m3840:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a2, 1048575
-; RV64IM-NEXT: addiw a2, a2, 256
+; RV64IM-NEXT: li a2, -15
+; RV64IM-NEXT: slli a2, a2, 8
; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: mulhu a3, a0, a2
; RV64IM-NEXT: sub a3, a3, a0
diff --git a/llvm/test/CodeGen/RISCV/pr58511.ll b/llvm/test/CodeGen/RISCV/pr58511.ll
index ed520af4645e5..b132f98d57920 100644
--- a/llvm/test/CodeGen/RISCV/pr58511.ll
+++ b/llvm/test/CodeGen/RISCV/pr58511.ll
@@ -9,8 +9,8 @@ define i32 @f(i1 %0, i32 %1, ptr %2) {
; CHECK-NEXT: subw a1, a1, a3
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
-; CHECK-NEXT: lui a3, 1
-; CHECK-NEXT: addiw a3, a3, -2048
+; CHECK-NEXT: li a3, 1
+; CHECK-NEXT: slli a3, a3, 11
; CHECK-NEXT: or a0, a0, a3
; CHECK-NEXT: sw a1, 0(a2)
; CHECK-NEXT: ret
@@ -30,8 +30,8 @@ define i32 @g(i1 %0, i32 %1, ptr %2) {
; CHECK-NEXT: subw a1, a1, a3
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: lui a3, 1
-; CHECK-NEXT: addiw a3, a3, -2048
+; CHECK-NEXT: li a3, 1
+; CHECK-NEXT: slli a3, a3, 11
; CHECK-NEXT: or a0, a0, a3
; CHECK-NEXT: sw a1, 0(a2)
; CHECK-NEXT: ret
@@ -69,8 +69,8 @@ define i32 @i(i1 %0, i32 %1, ptr %2) {
; CHECK-NEXT: slliw a1, a1, 12
; CHECK-NEXT: subw a1, a1, a3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: lui a3, 1
-; CHECK-NEXT: addiw a3, a3, -2048
+; CHECK-NEXT: li a3, 1
+; CHECK-NEXT: slli a3, a3, 11
; CHECK-NEXT: and a0, a0, a3
; CHECK-NEXT: sw a1, 0(a2)
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
index a57f4964f430f..b8b00a2916295 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -512,8 +512,8 @@ define i32 @bseti_i32_10(i32 %a) nounwind {
define i32 @bseti_i32_11(i32 %a) nounwind {
; RV32I-LABEL: bseti_i32_11:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 1
-; RV32I-NEXT: addi a1, a1, -2048
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: slli a1, a1, 11
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
@@ -567,8 +567,8 @@ define i32 @binvi_i32_10(i32 %a) nounwind {
define i32 @binvi_i32_11(i32 %a) nounwind {
; RV32I-LABEL: binvi_i32_11:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 1
-; RV32I-NEXT: addi a1, a1, -2048
+; RV32I-NEXT: li a1, 1
+; RV32I-NEXT: slli a1, a1, 11
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index 19ea17ba69862..39fac4c3e5676 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -684,8 +684,8 @@ define signext i32 @bseti_i32_10(i32 signext %a) nounwind {
define signext i32 @bseti_i32_11(i32 signext %a) nounwind {
; RV64I-LABEL: bseti_i32_11:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -2048
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 11
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
@@ -734,8 +734,8 @@ define i64 @bseti_i64_10(i64 %a) nounwind {
define i64 @bseti_i64_11(i64 %a) nounwind {
; RV64I-LABEL: bseti_i64_11:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -2048
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 11
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
@@ -822,8 +822,8 @@ define signext i32 @binvi_i32_10(i32 signext %a) nounwind {
define signext i32 @binvi_i32_11(i32 signext %a) nounwind {
; RV64I-LABEL: binvi_i32_11:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -2048
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 11
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ret
;
@@ -872,8 +872,8 @@ define i64 @binvi_i64_10(i64 %a) nounwind {
define i64 @binvi_i64_11(i64 %a) nounwind {
; RV64I-LABEL: binvi_i64_11:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -2048
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 11
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
index c8e26cea07269..0174ca0796f08 100644
--- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
@@ -68,8 +68,8 @@ define <vscale x 1 x i1> @above_maxvl(ptr %p) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: lui a0, 1
-; CHECK-NEXT: addiw a0, a0, -2048
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: slli a0, a0, 11
; CHECK-NEXT: vmsltu.vx v0, v8, a0
; CHECK-NEXT: ret
%mask = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64 0, i64 2048)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 49732432b700f..8dee813906505 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -238,65 +238,35 @@ define <4 x i64> @buildvec_vid_step2_add0_v4i64() {
}
define void @buildvec_no_vid_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3, <4 x i8>* %z4, <4 x i8>* %z5) {
-; RV32-LABEL: buildvec_no_vid_v4i8:
-; RV32: # %bb.0:
-; RV32-NEXT: lui a6, %hi(.LCPI14_0)
-; RV32-NEXT: addi a6, a6, %lo(.LCPI14_0)
-; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: vle8.v v8, (a6)
-; RV32-NEXT: lui a6, %hi(.LCPI14_1)
-; RV32-NEXT: addi a6, a6, %lo(.LCPI14_1)
-; RV32-NEXT: vle8.v v9, (a6)
-; RV32-NEXT: vse8.v v8, (a0)
-; RV32-NEXT: vse8.v v9, (a1)
-; RV32-NEXT: lui a0, 1
-; RV32-NEXT: addi a0, a0, -2048
-; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: vse8.v v8, (a2)
-; RV32-NEXT: li a0, 2047
-; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: lui a0, %hi(.LCPI14_2)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI14_2)
-; RV32-NEXT: vle8.v v9, (a0)
-; RV32-NEXT: vse8.v v8, (a3)
-; RV32-NEXT: vmv.v.i v8, -2
-; RV32-NEXT: vse8.v v8, (a4)
-; RV32-NEXT: vse8.v v9, (a5)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: buildvec_no_vid_v4i8:
-; RV64: # %bb.0:
-; RV64-NEXT: lui a6, %hi(.LCPI14_0)
-; RV64-NEXT: addi a6, a6, %lo(.LCPI14_0)
-; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: vle8.v v8, (a6)
-; RV64-NEXT: lui a6, %hi(.LCPI14_1)
-; RV64-NEXT: addi a6, a6, %lo(.LCPI14_1)
-; RV64-NEXT: vle8.v v9, (a6)
-; RV64-NEXT: vse8.v v8, (a0)
-; RV64-NEXT: vse8.v v9, (a1)
-; RV64-NEXT: lui a0, 1
-; RV64-NEXT: addiw a0, a0, -2048
-; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: vse8.v v8, (a2)
-; RV64-NEXT: li a0, 2047
-; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: lui a0, %hi(.LCPI14_2)
-; RV64-NEXT: addi a0, a0, %lo(.LCPI14_2)
-; RV64-NEXT: vle8.v v9, (a0)
-; RV64-NEXT: vse8.v v8, (a3)
-; RV64-NEXT: vmv.v.i v8, -2
-; RV64-NEXT: vse8.v v8, (a4)
-; RV64-NEXT: vse8.v v9, (a5)
-; RV64-NEXT: ret
+; CHECK-LABEL: buildvec_no_vid_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a6, %hi(.LCPI14_0)
+; CHECK-NEXT: addi a6, a6, %lo(.LCPI14_0)
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a6)
+; CHECK-NEXT: lui a6, %hi(.LCPI14_1)
+; CHECK-NEXT: addi a6, a6, %lo(.LCPI14_1)
+; CHECK-NEXT: vle8.v v9, (a6)
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: vse8.v v9, (a1)
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: slli a0, a0, 11
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a2)
+; CHECK-NEXT: li a0, 2047
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: lui a0, %hi(.LCPI14_2)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_2)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vse8.v v8, (a3)
+; CHECK-NEXT: vmv.v.i v8, -2
+; CHECK-NEXT: vse8.v v8, (a4)
+; CHECK-NEXT: vse8.v v9, (a5)
+; CHECK-NEXT: ret
store <4 x i8> <i8 1, i8 3, i8 6, i8 7>, <4 x i8>* %z0
store <4 x i8> <i8 undef, i8 2, i8 5, i8 7>, <4 x i8>* %z1
store <4 x i8> <i8 0, i8 undef, i8 undef, i8 8>, <4 x i8>* %z2
diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
index b17fac35e7206..f757e22e89cc9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
@@ -20,8 +20,8 @@
; CHECK-NEXT: sd a0, 8(sp)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: sd a1, 0(sp)
- ; CHECK-NEXT: lui a1, 1
- ; CHECK-NEXT: addiw a1, a1, -1024
+ ; CHECK-NEXT: li a1, 3
+ ; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: ld a1, 0(sp)
; CHECK-NEXT: sub sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll
index 919f33a2b18c4..3110b289b3203 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll
@@ -93,8 +93,8 @@ define i64 @con2048_minus_rem() {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: neg a0, a0
-; CHECK-NEXT: lui a1, 1
-; CHECK-NEXT: addiw a1, a1, -2048
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: slli a1, a1, 11
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
diff --git a/llvm/test/MC/RISCV/rv32c-aliases-valid.s b/llvm/test/MC/RISCV/rv32c-aliases-valid.s
index a0a1e8199ab0f..f688b27034424 100644
--- a/llvm/test/MC/RISCV/rv32c-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv32c-aliases-valid.s
@@ -24,8 +24,8 @@ li x10, -1
li x10, 2047
# CHECK-EXPAND: addi a0, zero, -2047
li x10, -2047
-# CHECK-EXPAND: c.lui a1, 1
-# CHECK-EXPAND: addi a1, a1, -2048
+# CHECK-EXPAND: c.li a1, 1
+# CHECK-EXPAND: c.slli a1, 11
li x11, 2048
# CHECK-EXPAND: addi a1, zero, -2048
li x11, -2048
diff --git a/llvm/test/MC/RISCV/rv32i-aliases-valid.s b/llvm/test/MC/RISCV/rv32i-aliases-valid.s
index 37d63135c1385..fb4004d9fe75b 100644
--- a/llvm/test/MC/RISCV/rv32i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv32i-aliases-valid.s
@@ -32,8 +32,10 @@ li x10, 2047
# CHECK-INST: addi a0, zero, -2047
# CHECK-ALIAS: li a0, -2047
li x10, -2047
-# CHECK-EXPAND: lui a1, 1
-# CHECK-EXPAND: addi a1, a1, -2048
+# CHECK-INST: addi a1, zero, 1
+# CHECK-INST: slli a1, a1, 11
+# CHECK-ALIAS: li a1, 1
+# CHECK-ALIAS: slli a1, a1, 11
li x11, 2048
# CHECK-INST: addi a1, zero, -2048
# CHECK-ALIAS: li a1, -2048
diff --git a/llvm/test/MC/RISCV/rv64c-aliases-valid.s b/llvm/test/MC/RISCV/rv64c-aliases-valid.s
index 5b5dacee9b81d..da38e6d1c4499 100644
--- a/llvm/test/MC/RISCV/rv64c-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64c-aliases-valid.s
@@ -24,8 +24,8 @@ li x10, -1
li x10, 2047
# CHECK-EXPAND: addi a0, zero, -2047
li x10, -2047
-# CHECK-EXPAND: c.lui a1, 1
-# CHECK-EXPAND: addiw a1, a1, -2048
+# CHECK-EXPAND: c.li a1, 1
+# CHECK-EXPAND: c.slli a1, 11
li x11, 2048
# CHECK-EXPAND: addi a1, zero, -2048
li x11, -2048
diff --git a/llvm/test/MC/RISCV/rv64i-aliases-valid.s b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
index ac72c812f67e4..acdc083135554 100644
--- a/llvm/test/MC/RISCV/rv64i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
@@ -35,8 +35,10 @@ li x10, 2047
# CHECK-INST: addi a0, zero, -2047
# CHECK-ALIAS: li a0, -2047
li x10, -2047
-# CHECK-EXPAND: lui a1, 1
-# CHECK-EXPAND: addiw a1, a1, -2048
+# CHECK-INST: addi a1, zero, 1
+# CHECK-INST: slli a1, a1, 11
+# CHECK-ALIAS: li a1, 1
+# CHECK-ALIAS: slli a1, a1, 11
li x11, 2048
# CHECK-INST: addi a1, zero, -2048
# CHECK-ALIAS: li a1, -2048
More information about the llvm-commits
mailing list