[llvm] [RISCV] Use addi rather than addiw for immediates materialised by lui+addi(w) pairs when possible (PR #141663)
via llvm-commits
llvm-commits at lists.llvm.org
Tue May 27 13:01:27 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-globalisel
Author: Alex Bradbury (asb)
<details>
<summary>Changes</summary>
The logic in RISCVMatInt would previously produce lui+addiw on RV64 whenever a 32-bit integer must be materialised and the Hi20 and Lo12 parts are non-zero. However, sometimes addi can be used equivalently (whenever the sign extension behaviour of addiw would be a no-op). This patch moves to using addiw only when necessary. Although there is absolutely no advantage in terms of compressibility or performance, this has the following advantages:
* It's more consistent with logic used elsewhere in the backend. For instance, RISCVOptWInstrs will try to convert addiw to addi on the basis it reduces test diffs vs RV32.
* This matches the lowering GCC does in its codegen path. Unlike LLVM, GCC seems to have different expansion logic for the assembler vs codegen. For codegen it will use lui+addi if possible, but expanding `li` in the assembler will always produces lui+addiw as LLVM did prior to this commit. As someone who has been looking at a lot of gcc vs clang diffs lately, reducing unnecessary divergence is of at least some value.
* As the diff for fold-mem-offset.ll shows, it appears we can fold memory offsets in more cases when addi is used. Memory offset folding could be taught to recognise when the addiw could be replaced with an addiw, but that seems unnecessary when we can simply change the logic in RISCVMatInt.
---
Just to underline again, there is no inherent advantage to addi vs addiw. The main itch I'd wanted to scratch was the second bulletpoint above (more closely matching gcc, after seeing the addi vs addiw difference in many comparisons). Trying this on the test suite, the changes are mostly minimal, but there is the occasional case where memory offset folding kicks in but didn't before.
This is far from the norm, but one example from a function in deepsjeng is below. Note: I'm sharing as a point of interest as I hadn't expected any positive codegen changes - I haven't characterised how often this happens and any impact on runtime instcount as I don't think this patch is predicated on being a perf improvement as to my mind it's sufficiently motivated by the first two bulletpoints in the patch description above.
```llvm
; ModuleID = '<stdin>'
source_filename = "<stdin>"
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
target triple = "riscv64-unknown-linux-gnu"
%struct.ham = type { i32, i32, [8 x %struct.zot], [8 x i32], [8 x %struct.quux] }
%struct.zot = type { i32, i32, i32 }
%struct.quux = type { i32, [64 x i32], i64, i64, i64, [13 x i64], i32, i32, [13 x i32], i32, i32, i32, i32, i32, i32, i32, i64, i64, [64 x %struct.wombat], [64 x i32], [64 x i32], [64 x %struct.wombat.0], i64, i64, i32, [64 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [1000 x i64] }
%struct.wombat = type { i32, i32, i32, i32, i64, i64 }
%struct.wombat.0 = type { i32, i32, i32, i32 }
@<!-- -->global = external global %struct.ham
define void @<!-- -->blam() {
bb:
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 16616), i8 0, i64 16, i1 false)
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 29016), i8 0, i64 16, i1 false)
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 41416), i8 0, i64 16, i1 false)
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 53816), i8 0, i64 16, i1 false)
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 66216), i8 0, i64 16, i1 false)
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 78616), i8 0, i64 16, i1 false)
tail call void @<!-- -->llvm.memset.p0.i64(ptr getelementptr inbounds nuw (i8, ptr @<!-- -->global, i64 91016), i8 0, i64 16, i1 false)
ret void
}
; Function Attrs: nocallback nofree nounwind willreturn memory(argmem: write)
declare void @<!-- -->llvm.memset.p0.i64(ptr writeonly captures(none), i8, i64, i1 immarg) #<!-- -->0
attributes #<!-- -->0 = { nocallback nofree nounwind willreturn memory(argmem: write) }
```
Before:
```
lui a0, %hi(global)
addi a0, a0, %lo(global)
lui a1, 4
lui a2, 7
lui a3, 10
lui a4, 13
lui a5, 16
lui a6, 19
lui a7, 22
addiw a1, a1, 232
addiw a2, a2, 344
addiw a3, a3, 456
addiw a4, a4, 568
addiw a5, a5, 680
addiw a6, a6, 792
addiw a7, a7, 904
add a1, a0, a1
add a2, a0, a2
add a3, a0, a3
add a4, a0, a4
add a5, a0, a5
add a6, a0, a6
add a0, a0, a7
sd zero, 0(a1)
sd zero, 8(a1)
sd zero, 0(a2)
sd zero, 8(a2)
sd zero, 0(a3)
sd zero, 8(a3)
sd zero, 0(a4)
sd zero, 8(a4)
sd zero, 0(a5)
sd zero, 8(a5)
sd zero, 0(a6)
sd zero, 8(a6)
sd zero, 0(a0)
sd zero, 8(a0)
ret
```
After:
```
lui a0, %hi(global)
addi a0, a0, %lo(global)
lui a1, 4
lui a2, 7
lui a3, 10
lui a4, 13
lui a5, 16
lui a6, 19
lui a7, 22
add a1, a0, a1
add a2, a0, a2
add a3, a0, a3
add a4, a0, a4
add a5, a0, a5
add a6, a0, a6
add a0, a0, a7
sd zero, 232(a1)
sd zero, 240(a1)
sd zero, 344(a2)
sd zero, 352(a2)
sd zero, 456(a3)
sd zero, 464(a3)
sd zero, 568(a4)
sd zero, 576(a4)
sd zero, 680(a5)
sd zero, 688(a5)
sd zero, 792(a6)
sd zero, 800(a6)
sd zero, 904(a0)
sd zero, 912(a0)
ret
.Lfunc_end0:
```
---
Patch is 463.58 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/141663.diff
150 Files Affected:
- (modified) llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp (+9-1)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir (+2-2)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant-f16.mir (+8-15)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant.mir (+14-21)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll (+23-23)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/abdu-neg.ll (+34-68)
- (modified) llvm/test/CodeGen/RISCV/abdu.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/addimm-mulimm.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/alu16.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/atomic-rmw.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/atomic-signext.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/avgceilu.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/avgflooru.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/bittest.ll (+21-35)
- (modified) llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/bswap-bitreverse.ll (+58-58)
- (modified) llvm/test/CodeGen/RISCV/calling-conv-half.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/codemodel-lowering.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll (+50-50)
- (modified) llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/div-by-constant.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/div.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/double-convert.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/float-convert.ll (+11-11)
- (modified) llvm/test/CodeGen/RISCV/float-imm.ll (+9-13)
- (modified) llvm/test/CodeGen/RISCV/float-intrinsics.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/fold-mem-offset.ll (+2-3)
- (modified) llvm/test/CodeGen/RISCV/fpclamptosat.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/fpenv.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/half-arith.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/half-convert.ll (+29-29)
- (modified) llvm/test/CodeGen/RISCV/half-imm.ll (+10-14)
- (modified) llvm/test/CodeGen/RISCV/half-intrinsics.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/i64-icmp.ll (+11-11)
- (modified) llvm/test/CodeGen/RISCV/imm.ll (+174-174)
- (modified) llvm/test/CodeGen/RISCV/inline-asm-mem-constraint.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/loop-strength-reduce-add-cheaper-than-mul.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/memset-inline.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/narrow-shl-cst.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir (+5-5)
- (modified) llvm/test/CodeGen/RISCV/overflow-intrinsics.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/pr135206.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/pr56457.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/pr58286.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/pr58511.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/pr68855.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/pr69586.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/pr90730.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/pr95271.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/prefer-w-inst.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/prefetch.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/prolog-epilogue.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rem.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rv64-float-convert.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rv64-half-convert.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rv64-patchpoint.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rv64xtheadba.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rv64xtheadbb.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rv64zba.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rv64zbb.ll (+39-39)
- (modified) llvm/test/CodeGen/RISCV/rv64zbkb.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rv64zbs.ll (+11-11)
- (modified) llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll (+40-40)
- (modified) llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll (+11-11)
- (modified) llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll (+32-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll (+48-48)
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll (+32-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll (+48-48)
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll (+40-40)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll (+96-96)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll (+48-48)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll (+96-96)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll (+25-46)
- (modified) llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/frm-insert.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/memset-inline.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/pr88799.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/stack-probing-dynamic.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/stack-probing-rvv.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/stepvector.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll (+7-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/zvqdotq-sdnode.ll (+25-46)
- (modified) llvm/test/CodeGen/RISCV/sadd_sat.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/sadd_sat_plus.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/select-cc.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/select-const.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/select.ll (+35-89)
- (modified) llvm/test/CodeGen/RISCV/sextw-removal.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/shl-cttz.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/shlimm-addimm.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/signed-truncation-check.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/split-offsets.ll (+11-14)
- (modified) llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/split-urem-by-constant.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/srem-lkk.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/srem-vector-lkk.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/ssub_sat.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/ssub_sat_plus.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/stack-clash-prologue-nounwind.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/stack-clash-prologue.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/stack-inst-compress.mir (+4-4)
- (modified) llvm/test/CodeGen/RISCV/stack-offset.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/stack-realignment.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/switch-width.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/trunc-nsw-nuw.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/uadd_sat.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/uadd_sat_plus.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/urem-vector-lkk.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/usub_sat_plus.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/vararg.ll (+14-14)
- (modified) llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll (+7-14)
- (modified) llvm/test/MC/RISCV/rv64c-aliases-valid.s (+9-9)
- (modified) llvm/test/MC/RISCV/rv64i-aliases-valid.s (+29-29)
- (modified) llvm/test/MC/RISCV/rv64zba-aliases-valid.s (+16-16)
- (modified) llvm/test/MC/RISCV/rv64zbs-aliases-valid.s (+4-4)
``````````diff
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 8ea2548258fdb..c14361e988de6 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -94,7 +94,15 @@ static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
Res.emplace_back(RISCV::LUI, Hi20);
if (Lo12 || Hi20 == 0) {
- unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
+ unsigned AddiOpc = RISCV::ADDI;
+ if (IsRV64 && Hi20) {
+ // Use ADDIW rather than ADDI only when necessary for correctness. As
+ // noted in RISCVOptWInstrs, this helps reduce test differences vs
+ // RV32 without being a pessimization.
+ int64_t LuiRes = SignExtend64<32>(Hi20 << 12);
+ if (LuiRes + Lo12 != SignExtend64<32>(LuiRes + Lo12))
+ AddiOpc = RISCV::ADDIW;
+ }
Res.emplace_back(AddiOpc, Lo12);
}
return;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
index 1632f92e96b50..487cb5768dcad 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
@@ -468,7 +468,7 @@ define i64 @subi_i64(i64 %a) {
; RV64IM-LABEL: subi_i64:
; RV64IM: # %bb.0: # %entry
; RV64IM-NEXT: lui a1, 1048275
-; RV64IM-NEXT: addiw a1, a1, -1548
+; RV64IM-NEXT: addi a1, a1, -1548
; RV64IM-NEXT: add a0, a0, a1
; RV64IM-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
index bce6dfacf8e82..68bc1e5db6095 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
@@ -174,8 +174,8 @@ define i24 @bitreverse_i24(i24 %x) {
; RV64-NEXT: slli a1, a0, 16
; RV64-NEXT: lui a2, 4096
; RV64-NEXT: lui a3, 1048335
-; RV64-NEXT: addiw a2, a2, -1
-; RV64-NEXT: addiw a3, a3, 240
+; RV64-NEXT: addi a2, a2, -1
+; RV64-NEXT: addi a3, a3, 240
; RV64-NEXT: and a0, a0, a2
; RV64-NEXT: srli a0, a0, 16
; RV64-NEXT: or a0, a0, a1
@@ -184,7 +184,7 @@ define i24 @bitreverse_i24(i24 %x) {
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: and a0, a0, a3
; RV64-NEXT: lui a3, 1047757
-; RV64-NEXT: addiw a3, a3, -820
+; RV64-NEXT: addi a3, a3, -820
; RV64-NEXT: srli a1, a1, 4
; RV64-NEXT: or a0, a1, a0
; RV64-NEXT: and a1, a3, a2
@@ -192,7 +192,7 @@ define i24 @bitreverse_i24(i24 %x) {
; RV64-NEXT: slli a0, a0, 2
; RV64-NEXT: and a0, a0, a3
; RV64-NEXT: lui a3, 1047211
-; RV64-NEXT: addiw a3, a3, -1366
+; RV64-NEXT: addi a3, a3, -1366
; RV64-NEXT: and a2, a3, a2
; RV64-NEXT: srli a1, a1, 2
; RV64-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
index 9c46e6792e8d8..94b8afcabbd52 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
@@ -25,7 +25,7 @@ define i32 @udiv_constant_no_add(i32 %a) nounwind {
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: lui a1, 205
; RV64IM-NEXT: srli a0, a0, 32
-; RV64IM-NEXT: addiw a1, a1, -819
+; RV64IM-NEXT: addi a1, a1, -819
; RV64IM-NEXT: slli a1, a1, 12
; RV64IM-NEXT: addi a1, a1, -819
; RV64IM-NEXT: mul a0, a0, a1
@@ -62,7 +62,7 @@ define i32 @udiv_constant_add(i32 %a) nounwind {
; RV64IM: # %bb.0:
; RV64IM-NEXT: lui a1, 149797
; RV64IM-NEXT: slli a2, a0, 32
-; RV64IM-NEXT: addiw a1, a1, -1755
+; RV64IM-NEXT: addi a1, a1, -1755
; RV64IM-NEXT: srli a2, a2, 32
; RV64IM-NEXT: mul a1, a2, a1
; RV64IM-NEXT: srli a1, a1, 32
@@ -75,7 +75,7 @@ define i32 @udiv_constant_add(i32 %a) nounwind {
; RV64IMZB-LABEL: udiv_constant_add:
; RV64IMZB: # %bb.0:
; RV64IMZB-NEXT: lui a1, 149797
-; RV64IMZB-NEXT: addiw a1, a1, -1755
+; RV64IMZB-NEXT: addi a1, a1, -1755
; RV64IMZB-NEXT: zext.w a2, a0
; RV64IMZB-NEXT: mul a1, a2, a1
; RV64IMZB-NEXT: srli a1, a1, 32
@@ -301,7 +301,7 @@ define i16 @udiv16_constant_no_add(i16 %a) nounwind {
; RV64IM-NEXT: slli a0, a0, 48
; RV64IM-NEXT: lui a1, 13
; RV64IM-NEXT: srli a0, a0, 48
-; RV64IM-NEXT: addiw a1, a1, -819
+; RV64IM-NEXT: addi a1, a1, -819
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: srli a0, a0, 18
; RV64IM-NEXT: ret
@@ -310,7 +310,7 @@ define i16 @udiv16_constant_no_add(i16 %a) nounwind {
; RV64IMZB: # %bb.0:
; RV64IMZB-NEXT: zext.h a0, a0
; RV64IMZB-NEXT: lui a1, 13
-; RV64IMZB-NEXT: addiw a1, a1, -819
+; RV64IMZB-NEXT: addi a1, a1, -819
; RV64IMZB-NEXT: mul a0, a0, a1
; RV64IMZB-NEXT: srli a0, a0, 18
; RV64IMZB-NEXT: ret
@@ -355,8 +355,8 @@ define i16 @udiv16_constant_add(i16 %a) nounwind {
; RV64IM: # %bb.0:
; RV64IM-NEXT: lui a1, 2
; RV64IM-NEXT: lui a2, 16
-; RV64IM-NEXT: addiw a1, a1, 1171
-; RV64IM-NEXT: addiw a2, a2, -1
+; RV64IM-NEXT: addi a1, a1, 1171
+; RV64IM-NEXT: addi a2, a2, -1
; RV64IM-NEXT: and a3, a0, a2
; RV64IM-NEXT: mul a1, a3, a1
; RV64IM-NEXT: srli a1, a1, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
index 05730a710b4d8..88413291c26cd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
@@ -1002,7 +1002,7 @@ define i1 @fpclass(float %x) {
; RV64I-NEXT: lui a4, 2048
; RV64I-NEXT: lui a5, 520192
; RV64I-NEXT: srli a2, a2, 33
-; RV64I-NEXT: addiw a6, a4, -1
+; RV64I-NEXT: addi a6, a4, -1
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: subw a3, a2, a3
; RV64I-NEXT: sltu a3, a3, a6
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
index 646152e2e4ed4..0f00bd0ced264 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
@@ -159,8 +159,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 524288
- ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], 648
- ; CHECK-NEXT: $x10 = COPY [[ADDIW]]
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], 648
+ ; CHECK-NEXT: $x10 = COPY [[ADDI]]
; CHECK-NEXT: PseudoRET implicit $x10
%0:gprb(s64) = G_CONSTANT i64 -2147483000
$x10 = COPY %0(s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant-f16.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant-f16.mir
index 8951e373ba7a9..3028b6476e20b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant-f16.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant-f16.mir
@@ -1,8 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
# RUN: llc -mtriple=riscv32 -mattr=+zfh -run-pass=instruction-select \
-# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,RV32
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
# RUN: llc -mtriple=riscv64 -mattr=+zfh -run-pass=instruction-select \
-# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,RV64
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
---
name: half_imm
@@ -10,19 +10,12 @@ legalized: true
regBankSelected: true
body: |
bb.1:
- ; RV32-LABEL: name: half_imm
- ; RV32: [[LUI:%[0-9]+]]:gpr = LUI 4
- ; RV32-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], 584
- ; RV32-NEXT: [[FMV_H_X:%[0-9]+]]:fpr16 = FMV_H_X [[ADDI]]
- ; RV32-NEXT: $f10_h = COPY [[FMV_H_X]]
- ; RV32-NEXT: PseudoRET implicit $f10_h
- ;
- ; RV64-LABEL: name: half_imm
- ; RV64: [[LUI:%[0-9]+]]:gpr = LUI 4
- ; RV64-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], 584
- ; RV64-NEXT: [[FMV_H_X:%[0-9]+]]:fpr16 = FMV_H_X [[ADDIW]]
- ; RV64-NEXT: $f10_h = COPY [[FMV_H_X]]
- ; RV64-NEXT: PseudoRET implicit $f10_h
+ ; CHECK-LABEL: name: half_imm
+ ; CHECK: [[LUI:%[0-9]+]]:gpr = LUI 4
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], 584
+ ; CHECK-NEXT: [[FMV_H_X:%[0-9]+]]:fpr16 = FMV_H_X [[ADDI]]
+ ; CHECK-NEXT: $f10_h = COPY [[FMV_H_X]]
+ ; CHECK-NEXT: PseudoRET implicit $f10_h
%0:fprb(s16) = G_FCONSTANT half 0xH4248
$f10_h = COPY %0(s16)
PseudoRET implicit $f10_h
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant.mir
index 43f5ec1f57907..e82d4bcec48b1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/fp-constant.mir
@@ -10,19 +10,12 @@ legalized: true
regBankSelected: true
body: |
bb.1:
- ; RV32-LABEL: name: float_imm
- ; RV32: [[LUI:%[0-9]+]]:gpr = LUI 263313
- ; RV32-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], -37
- ; RV32-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[ADDI]]
- ; RV32-NEXT: $f10_f = COPY [[FMV_W_X]]
- ; RV32-NEXT: PseudoRET implicit $f10_f
- ;
- ; RV64-LABEL: name: float_imm
- ; RV64: [[LUI:%[0-9]+]]:gpr = LUI 263313
- ; RV64-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -37
- ; RV64-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[ADDIW]]
- ; RV64-NEXT: $f10_f = COPY [[FMV_W_X]]
- ; RV64-NEXT: PseudoRET implicit $f10_f
+ ; CHECK-LABEL: name: float_imm
+ ; CHECK: [[LUI:%[0-9]+]]:gpr = LUI 263313
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], -37
+ ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[ADDI]]
+ ; CHECK-NEXT: $f10_f = COPY [[FMV_W_X]]
+ ; CHECK-NEXT: PseudoRET implicit $f10_f
%0:fprb(s32) = G_FCONSTANT float 0x400921FB60000000
$f10_f = COPY %0(s32)
PseudoRET implicit $f10_f
@@ -109,14 +102,14 @@ body: |
;
; RV64-LABEL: name: double_imm
; RV64: [[LUI:%[0-9]+]]:gpr = LUI 512
- ; RV64-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], 1169
- ; RV64-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADDIW]], 15
- ; RV64-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[SLLI]], -299
- ; RV64-NEXT: [[SLLI1:%[0-9]+]]:gpr = SLLI [[ADDI]], 14
- ; RV64-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI [[SLLI1]], 1091
- ; RV64-NEXT: [[SLLI2:%[0-9]+]]:gpr = SLLI [[ADDI1]], 12
- ; RV64-NEXT: [[ADDI2:%[0-9]+]]:gpr = ADDI [[SLLI2]], -744
- ; RV64-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[ADDI2]]
+ ; RV64-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], 1169
+ ; RV64-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADDI]], 15
+ ; RV64-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI [[SLLI]], -299
+ ; RV64-NEXT: [[SLLI1:%[0-9]+]]:gpr = SLLI [[ADDI1]], 14
+ ; RV64-NEXT: [[ADDI2:%[0-9]+]]:gpr = ADDI [[SLLI1]], 1091
+ ; RV64-NEXT: [[SLLI2:%[0-9]+]]:gpr = SLLI [[ADDI2]], 12
+ ; RV64-NEXT: [[ADDI3:%[0-9]+]]:gpr = ADDI [[SLLI2]], -744
+ ; RV64-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[ADDI3]]
; RV64-NEXT: $f10_d = COPY [[FMV_D_X]]
; RV64-NEXT: PseudoRET implicit $f10_d
%0:fprb(s64) = G_FCONSTANT double 0x400921FB54442D18
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index 8549a7c526e45..6fb3572774c52 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -40,9 +40,9 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: li a1, 32
@@ -97,9 +97,9 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: li a1, 32
@@ -162,9 +162,9 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: li a1, 32
@@ -221,9 +221,9 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: beqz s0, .LBB3_2
; RV64I-NEXT: # %bb.1:
@@ -292,9 +292,9 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: li a1, 32
@@ -421,9 +421,9 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -464,9 +464,9 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -506,9 +506,9 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: beqz s0, .LBB8_2
; RV64I-NEXT: # %bb.1:
@@ -562,9 +562,9 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 0
@@ -681,9 +681,9 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -737,9 +737,9 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: sraiw a1, a0, 4
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a2, a2, -241
+; RV64I-NEXT: addi a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
-; RV64I-NEXT: addiw a1, a1, 257
+; RV64I-NEXT: addi a1, a1, 257
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1185,7 +1185,7 @@ define i64 @bswap_i64(i64 %a) {
; RV64I-NEXT: srli a4, a0, 40
; RV64I-NEXT: or a1, a2, a1
; RV64I-NEXT: lui a2, 4080
-; RV64I-NEXT: addiw a3, a3, -256
+; RV64I-NEXT: addi a3, a3, -256
; RV64I-NEXT: and a4, a4, a3
; RV64I-NEXT: or a1, a1, a4
; RV64I-NEXT: srli a4, a0, 24
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
index f413abffcdccc..cd59c9e01806d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
@@ -141,7 +141,7 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, 16
; RV64I-NEXT: zext.b a0, a0
-; RV64I-NEXT: addiw a2, a2, -256
+; RV64I-NEXT: addi a2, a2, -256
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: or a0, a1, a0
@@ -151,7 +151,7 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: lui a2, 16
; RV64ZBKB-NEXT: zext.b a0, a0
-; RV64ZBKB-NEXT: addiw a2, a2, -256
+; RV64ZBKB-NEXT: addi a2, a2, -256
; RV64ZBKB-NEXT: slli a1, a1, 8
; RV64ZBKB-NEXT: and a1, a1, a2
; RV64ZBKB-NEXT: or a0, a1, a0
@@ -189,7 +189,7 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, 16
; RV64I-NEXT: zext.b a0, a0
-; RV64I-NEXT: addiw a2, a2, -256
+; RV64I-NEXT: addi a2, a2, -256
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: or a0, a1, a0
@@ -199,7 +199,7 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: lui a2, 16
; RV64ZBKB-NEXT: zext.b a0, a0
-; RV64ZBKB-NEXT: addiw a2, a2, -256
+; RV64ZBKB-NEXT: addi a2, a2, -256
; RV64ZBKB-NEXT: slli a1, a1, 8
; RV64ZBKB-NEXT: and a1, a1, a2
; RV64ZBKB-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index fc9be94988451..afef96db5e290 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -1171,7 +1171,7 @@ define void @va3_caller() nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: lui a1, 5
; RV64-NEXT: li a0, 2
-; RV64-NEXT: addiw a2, a1, -480
+; RV64-NEXT: addi a2, a1, -480
; RV64-NEXT: li a1, 1111
; RV64-NEXT: call va3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1203,7 +1203,7 @@ define void @va3_caller() nounwind {
; RV64-WITHFP-NEXT: addi s0, sp, 16
; RV64-WITHFP-NEXT: lui a1, 5
; RV64-WITHFP-NEXT: li a0, 2
-; RV64-WITHFP-NEXT: addiw a2, a1, -480
+; RV64-WITHFP-NEXT: addi a2, a1, -480
; RV64-WITHFP-NEXT: li a1, 1111
; RV64-WITHFP-NEXT: call va3
; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1618,7 +1618,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-LABEL: va_large_stack:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 24414
-; RV64-NEXT: addiw a0, a0, 336
+; RV64-NEXT: addi a0, a0, 336
; RV64-NEXT: sub sp, sp, a0
; RV64-NEXT: .cfi_def_cfa_offset 100000080
; RV64-NEXT: lui a0, 24414
@@ -1635,7 +1635,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-NEXT: sd a4, 304(a0)
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: lui a1, 24414
-; RV64-NEXT: addiw a1, a1, 280
+; RV64-NEXT: addi a1, a1, 280
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: sd a1, 8(sp)
; RV64-NEXT: lw a0, 4(a0)
@@ -1657,7 +1657,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-NEXT: sw a2, 12(sp)
; RV64-NEXT: lw a0, 0(a0...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/141663
More information about the llvm-commits
mailing list