[llvm] 8e43c22 - [RISCV] Use LBU for extloadi8.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 21 18:55:29 PDT 2023
Author: Craig Topper
Date: 2023-03-21T18:52:05-07:00
New Revision: 8e43c22d303835bf90a64f6732df4dfc028ac688
URL: https://github.com/llvm/llvm-project/commit/8e43c22d303835bf90a64f6732df4dfc028ac688
DIFF: https://github.com/llvm/llvm-project/commit/8e43c22d303835bf90a64f6732df4dfc028ac688.diff
LOG: [RISCV] Use LBU for extloadi8.
The Zcb extension has c.lbu, but not c.lb. This patch makes us
prefer LBU over LB if we have a choice which will enable more
compression opportunities.
Reviewed By: asb
Differential Revision: https://reviews.llvm.org/D146270
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfo.td
llvm/test/CodeGen/RISCV/atomic-rmw.ll
llvm/test/CodeGen/RISCV/atomic-signext.ll
llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
llvm/test/CodeGen/RISCV/forced-atomics.ll
llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
llvm/test/CodeGen/RISCV/mem.ll
llvm/test/CodeGen/RISCV/mem64.ll
llvm/test/CodeGen/RISCV/memcpy-inline.ll
llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
llvm/test/CodeGen/RISCV/unaligned-load-store.ll
llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 6ce39999b1a08..ab8a8a4cc9935 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1625,7 +1625,7 @@ multiclass LdPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT> {
}
defm : LdPat<sextloadi8, LB>;
-defm : LdPat<extloadi8, LB>;
+defm : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb.
defm : LdPat<sextloadi16, LH>;
defm : LdPat<extloadi16, LH>;
defm : LdPat<load, LW, i32>, Requires<[IsRV32]>;
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index 424d28a87bb26..0027d0a24f072 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -2024,7 +2024,7 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB35_4
; RV32I-NEXT: .LBB35_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2095,7 +2095,7 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB35_4
; RV64I-NEXT: .LBB35_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2170,7 +2170,7 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB36_4
; RV32I-NEXT: .LBB36_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2241,7 +2241,7 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB36_4
; RV64I-NEXT: .LBB36_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2316,7 +2316,7 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB37_4
; RV32I-NEXT: .LBB37_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2387,7 +2387,7 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB37_4
; RV64I-NEXT: .LBB37_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2462,7 +2462,7 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB38_4
; RV32I-NEXT: .LBB38_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2533,7 +2533,7 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB38_4
; RV64I-NEXT: .LBB38_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2608,7 +2608,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB39_4
; RV32I-NEXT: .LBB39_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2679,7 +2679,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB39_4
; RV64I-NEXT: .LBB39_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2754,7 +2754,7 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB40_4
; RV32I-NEXT: .LBB40_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2825,7 +2825,7 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB40_4
; RV64I-NEXT: .LBB40_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2900,7 +2900,7 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB41_4
; RV32I-NEXT: .LBB41_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -2971,7 +2971,7 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB41_4
; RV64I-NEXT: .LBB41_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3046,7 +3046,7 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB42_4
; RV32I-NEXT: .LBB42_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3117,7 +3117,7 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB42_4
; RV64I-NEXT: .LBB42_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3192,7 +3192,7 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB43_4
; RV32I-NEXT: .LBB43_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3263,7 +3263,7 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB43_4
; RV64I-NEXT: .LBB43_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3338,7 +3338,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB44_4
; RV32I-NEXT: .LBB44_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3409,7 +3409,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB44_4
; RV64I-NEXT: .LBB44_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3483,7 +3483,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB45_4
; RV32I-NEXT: .LBB45_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3547,7 +3547,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB45_4
; RV64I-NEXT: .LBB45_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3615,7 +3615,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB46_4
; RV32I-NEXT: .LBB46_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3679,7 +3679,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB46_4
; RV64I-NEXT: .LBB46_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3747,7 +3747,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB47_4
; RV32I-NEXT: .LBB47_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3811,7 +3811,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB47_4
; RV64I-NEXT: .LBB47_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3879,7 +3879,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB48_4
; RV32I-NEXT: .LBB48_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -3943,7 +3943,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB48_4
; RV64I-NEXT: .LBB48_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4011,7 +4011,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB49_4
; RV32I-NEXT: .LBB49_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4075,7 +4075,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB49_4
; RV64I-NEXT: .LBB49_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4143,7 +4143,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB50_4
; RV32I-NEXT: .LBB50_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4207,7 +4207,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB50_4
; RV64I-NEXT: .LBB50_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4275,7 +4275,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB51_4
; RV32I-NEXT: .LBB51_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4339,7 +4339,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB51_4
; RV64I-NEXT: .LBB51_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4407,7 +4407,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB52_4
; RV32I-NEXT: .LBB52_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4471,7 +4471,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB52_4
; RV64I-NEXT: .LBB52_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4539,7 +4539,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB53_4
; RV32I-NEXT: .LBB53_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4603,7 +4603,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB53_4
; RV64I-NEXT: .LBB53_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4671,7 +4671,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB54_4
; RV32I-NEXT: .LBB54_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -4735,7 +4735,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB54_4
; RV64I-NEXT: .LBB54_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 9b3351ebe1ffd..48fce21490592 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -596,7 +596,7 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB10_4
; RV32I-NEXT: .LBB10_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -670,7 +670,7 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB10_4
; RV64I-NEXT: .LBB10_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -748,7 +748,7 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB11_4
; RV32I-NEXT: .LBB11_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -822,7 +822,7 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB11_4
; RV64I-NEXT: .LBB11_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -899,7 +899,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB12_4
; RV32I-NEXT: .LBB12_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -966,7 +966,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB12_4
; RV64I-NEXT: .LBB12_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1037,7 +1037,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB13_4
; RV32I-NEXT: .LBB13_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1104,7 +1104,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB13_4
; RV64I-NEXT: .LBB13_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index 7c871089bca03..f1528e94c473c 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -41,7 +41,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 3(sp)
+; RV32I-NEXT: lbu a3, 3(sp)
; RV32I-NEXT: beqz a0, .LBB0_1
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
; RV32I-NEXT: mv a0, a3
@@ -117,7 +117,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 7(sp)
+; RV64I-NEXT: lbu a3, 7(sp)
; RV64I-NEXT: beqz a0, .LBB0_1
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
; RV64I-NEXT: mv a0, a3
@@ -670,7 +670,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB4_4
; RV32I-NEXT: .LBB4_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
@@ -766,7 +766,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB4_4
; RV64I-NEXT: .LBB4_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index 7c92a74dc9517..d15bc2b53ce81 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -137,7 +137,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1 at plt
-; RV32-NO-ATOMIC-NEXT: lb a0, 11(sp)
+; RV32-NO-ATOMIC-NEXT: lbu a0, 11(sp)
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -163,7 +163,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1 at plt
-; RV64-NO-ATOMIC-NEXT: lb a0, 7(sp)
+; RV64-NO-ATOMIC-NEXT: lbu a0, 7(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
index 5375189197d96..90dad26a1833f 100644
--- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
+++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
@@ -382,7 +382,7 @@ define dso_local void @rmw_addi_addi() nounwind {
; RV32-LABEL: rmw_addi_addi:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lui a0, %hi(bar+3211)
-; RV32-NEXT: lb a1, %lo(bar+3211)(a0)
+; RV32-NEXT: lbu a1, %lo(bar+3211)(a0)
; RV32-NEXT: addi a1, a1, 10
; RV32-NEXT: sb a1, %lo(bar+3211)(a0)
; RV32-NEXT: ret
@@ -390,7 +390,7 @@ define dso_local void @rmw_addi_addi() nounwind {
; RV64-LABEL: rmw_addi_addi:
; RV64: # %bb.0: # %entry
; RV64-NEXT: lui a0, %hi(bar+3211)
-; RV64-NEXT: lb a1, %lo(bar+3211)(a0)
+; RV64-NEXT: lbu a1, %lo(bar+3211)(a0)
; RV64-NEXT: addiw a1, a1, 10
; RV64-NEXT: sb a1, %lo(bar+3211)(a0)
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
index 186b8287d87c9..ef64eeb9b1869 100644
--- a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
+++ b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
@@ -15,8 +15,8 @@ define void @use_frame_base_reg() {
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: addi a0, a0, 1704
; RV32I-NEXT: add a0, sp, a0
-; RV32I-NEXT: lb a1, 4(a0)
-; RV32I-NEXT: lb a0, 0(a0)
+; RV32I-NEXT: lbu a1, 4(a0)
+; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: addi a0, a0, 1712
; RV32I-NEXT: add sp, sp, a0
@@ -31,8 +31,8 @@ define void @use_frame_base_reg() {
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: addiw a0, a0, 1704
; RV64I-NEXT: add a0, sp, a0
-; RV64I-NEXT: lb a1, 4(a0)
-; RV64I-NEXT: lb a0, 0(a0)
+; RV64I-NEXT: lbu a1, 4(a0)
+; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: addiw a0, a0, 1712
; RV64I-NEXT: add sp, sp, a0
@@ -57,10 +57,10 @@ define void @load_with_offset() {
; RV32I-NEXT: .cfi_def_cfa_offset 100608
; RV32I-NEXT: lui a0, 25
; RV32I-NEXT: add a0, sp, a0
-; RV32I-NEXT: lb a0, -292(a0)
+; RV32I-NEXT: lbu a0, -292(a0)
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: add a0, sp, a0
-; RV32I-NEXT: lb a0, 1704(a0)
+; RV32I-NEXT: lbu a0, 1704(a0)
; RV32I-NEXT: lui a0, 25
; RV32I-NEXT: addi a0, a0, -1792
; RV32I-NEXT: add sp, sp, a0
@@ -74,10 +74,10 @@ define void @load_with_offset() {
; RV64I-NEXT: .cfi_def_cfa_offset 100608
; RV64I-NEXT: lui a0, 25
; RV64I-NEXT: add a0, sp, a0
-; RV64I-NEXT: lb a0, -292(a0)
+; RV64I-NEXT: lbu a0, -292(a0)
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: add a0, sp, a0
-; RV64I-NEXT: lb a0, 1704(a0)
+; RV64I-NEXT: lbu a0, 1704(a0)
; RV64I-NEXT: lui a0, 25
; RV64I-NEXT: addiw a0, a0, -1792
; RV64I-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll
index 700680eec46c5..74874c1ca74b3 100644
--- a/llvm/test/CodeGen/RISCV/mem.ll
+++ b/llvm/test/CodeGen/RISCV/mem.ll
@@ -8,7 +8,7 @@ define dso_local i32 @lb(ptr %a) nounwind {
; RV32I-LABEL: lb:
; RV32I: # %bb.0:
; RV32I-NEXT: lb a1, 1(a0)
-; RV32I-NEXT: lb a0, 0(a0)
+; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = getelementptr i8, ptr %a, i32 1
@@ -123,7 +123,7 @@ define dso_local i32 @load_sext_zext_anyext_i1(ptr %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: lbu a1, 1(a0)
; RV32I-NEXT: lbu a2, 2(a0)
-; RV32I-NEXT: lb a0, 0(a0)
+; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: sub a0, a2, a1
; RV32I-NEXT: ret
; sextload i1
@@ -145,7 +145,7 @@ define dso_local i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: lbu a1, 1(a0)
; RV32I-NEXT: lbu a2, 2(a0)
-; RV32I-NEXT: lb a0, 0(a0)
+; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: sub a0, a2, a1
; RV32I-NEXT: ret
; sextload i1
diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index ab775481accc3..903c5b223b69c 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -8,7 +8,7 @@ define dso_local i64 @lb(ptr %a) nounwind {
; RV64I-LABEL: lb:
; RV64I: # %bb.0:
; RV64I-NEXT: lb a1, 1(a0)
-; RV64I-NEXT: lb a0, 0(a0)
+; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = getelementptr i8, ptr %a, i32 1
@@ -168,7 +168,7 @@ define dso_local i64 @load_sext_zext_anyext_i1(ptr %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: lbu a1, 1(a0)
; RV64I-NEXT: lbu a2, 2(a0)
-; RV64I-NEXT: lb a0, 0(a0)
+; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: sub a0, a2, a1
; RV64I-NEXT: ret
; sextload i1
@@ -190,7 +190,7 @@ define dso_local i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: lbu a1, 1(a0)
; RV64I-NEXT: lbu a2, 2(a0)
-; RV64I-NEXT: lb a0, 0(a0)
+; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: sub a0, a2, a1
; RV64I-NEXT: ret
; sextload i1
diff --git a/llvm/test/CodeGen/RISCV/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/memcpy-inline.ll
index 05cb2a83e1e70..4b1b01ce0151c 100644
--- a/llvm/test/CodeGen/RISCV/memcpy-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy-inline.ll
@@ -28,7 +28,7 @@ define i32 @t0() {
; RV32-NEXT: lui a2, %hi(dst)
; RV32-NEXT: sw a1, %lo(dst)(a2)
; RV32-NEXT: addi a0, a0, %lo(src)
-; RV32-NEXT: lb a1, 10(a0)
+; RV32-NEXT: lbu a1, 10(a0)
; RV32-NEXT: lh a3, 8(a0)
; RV32-NEXT: lw a0, 4(a0)
; RV32-NEXT: addi a2, a2, %lo(dst)
@@ -44,7 +44,7 @@ define i32 @t0() {
; RV64-NEXT: ld a1, %lo(src)(a0)
; RV64-NEXT: lui a2, %hi(dst)
; RV64-NEXT: addi a0, a0, %lo(src)
-; RV64-NEXT: lb a3, 10(a0)
+; RV64-NEXT: lbu a3, 10(a0)
; RV64-NEXT: lh a0, 8(a0)
; RV64-NEXT: sd a1, %lo(dst)(a2)
; RV64-NEXT: addi a1, a2, %lo(dst)
diff --git a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
index 961cb23714fb2..4a9d8b08a4b2f 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
@@ -177,11 +177,11 @@ define i8 @test13(ptr %0, i64 %1) {
; RV64I-NEXT: li a2, 1
; RV64I-NEXT: subw a2, a2, a1
; RV64I-NEXT: add a2, a0, a2
-; RV64I-NEXT: lb a2, 0(a2)
+; RV64I-NEXT: lbu a2, 0(a2)
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: subw a3, a3, a1
; RV64I-NEXT: add a0, a0, a3
-; RV64I-NEXT: lb a0, 0(a0)
+; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: add a0, a2, a0
; RV64I-NEXT: ret
%3 = mul i64 %1, -4294967296
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index 81fdef107d4db..63fcc6ad9e426 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -162,7 +162,7 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vmerge.vim v8, v16, 1, v0
; RV32-NEXT: vs8r.v v8, (a2)
-; RV32-NEXT: lb a0, 0(a1)
+; RV32-NEXT: lbu a0, 0(a1)
; RV32-NEXT: addi sp, s0, -80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
@@ -202,7 +202,7 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vmerge.vim v8, v16, 1, v0
; RV64-NEXT: vs8r.v v8, (a2)
-; RV64-NEXT: lb a0, 0(a1)
+; RV64-NEXT: lbu a0, 0(a1)
; RV64-NEXT: addi sp, s0, -80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
index 34002c4015b53..311491fa6018c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
@@ -845,8 +845,8 @@ define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, pt
; CHECK-NEXT: add a1, a1, a4
; CHECK-NEXT: .LBB13_6: # %bb35
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lb a3, 0(a1)
-; CHECK-NEXT: lb a4, 0(a0)
+; CHECK-NEXT: lbu a3, 0(a1)
+; CHECK-NEXT: lbu a4, 0(a0)
; CHECK-NEXT: add a3, a4, a3
; CHECK-NEXT: sb a3, 0(a0)
; CHECK-NEXT: addiw a2, a2, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index ded6df1a77ef9..47add40335931 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -343,7 +343,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: vmerge.vim v8, v16, 1, v0
; RV32-NEXT: addi a0, sp, 128
; RV32-NEXT: vse8.v v8, (a0)
-; RV32-NEXT: lb a0, 0(a1)
+; RV32-NEXT: lbu a0, 0(a1)
; RV32-NEXT: addi sp, s0, -384
; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload
@@ -374,7 +374,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: vmerge.vim v8, v16, 1, v0
; RV64-NEXT: addi a0, sp, 128
; RV64-NEXT: vse8.v v8, (a0)
-; RV64-NEXT: lb a0, 0(a1)
+; RV64-NEXT: lbu a0, 0(a1)
; RV64-NEXT: addi sp, s0, -384
; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
@@ -405,7 +405,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: vmerge.vim v8, v16, 1, v0
; RV32ZBS-NEXT: addi a0, sp, 128
; RV32ZBS-NEXT: vse8.v v8, (a0)
-; RV32ZBS-NEXT: lb a0, 0(a1)
+; RV32ZBS-NEXT: lbu a0, 0(a1)
; RV32ZBS-NEXT: addi sp, s0, -384
; RV32ZBS-NEXT: lw ra, 380(sp) # 4-byte Folded Reload
; RV32ZBS-NEXT: lw s0, 376(sp) # 4-byte Folded Reload
@@ -436,7 +436,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64ZBS-NEXT: vmerge.vim v8, v16, 1, v0
; RV64ZBS-NEXT: addi a0, sp, 128
; RV64ZBS-NEXT: vse8.v v8, (a0)
-; RV64ZBS-NEXT: lb a0, 0(a1)
+; RV64ZBS-NEXT: lbu a0, 0(a1)
; RV64ZBS-NEXT: addi sp, s0, -384
; RV64ZBS-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; RV64ZBS-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index b531df25cad7b..5941e662fec27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -82,13 +82,13 @@ define <2 x i8> @mgather_v2i8(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i8> %passthru)
; RV64ZVE32F-NEXT: .LBB1_2: # %else2
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB1_3: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB1_2
; RV64ZVE32F-NEXT: .LBB1_4: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -129,14 +129,14 @@ define <2 x i16> @mgather_v2i8_sextload_v2i16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB2_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB2_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB2_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -182,14 +182,14 @@ define <2 x i16> @mgather_v2i8_zextload_v2i16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB3_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB3_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB3_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -235,14 +235,14 @@ define <2 x i32> @mgather_v2i8_sextload_v2i32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB4_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB4_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB4_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -288,14 +288,14 @@ define <2 x i32> @mgather_v2i8_zextload_v2i32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB5_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB5_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB5_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -349,14 +349,14 @@ define <2 x i64> @mgather_v2i8_sextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB6_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB6_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB6_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -411,14 +411,14 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB7_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB7_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB7_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
@@ -471,14 +471,14 @@ define <4 x i8> @mgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i8> %passthru)
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB8_5: # %cond.load
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB8_2
; RV64ZVE32F-NEXT: .LBB8_6: # %cond.load1
; RV64ZVE32F-NEXT: ld a2, 8(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
@@ -487,7 +487,7 @@ define <4 x i8> @mgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a2, .LBB8_3
; RV64ZVE32F-NEXT: .LBB8_7: # %cond.load4
; RV64ZVE32F-NEXT: ld a2, 16(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
@@ -496,7 +496,7 @@ define <4 x i8> @mgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a1, .LBB8_4
; RV64ZVE32F-NEXT: .LBB8_8: # %cond.load7
; RV64ZVE32F-NEXT: ld a0, 24(a0)
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
@@ -539,14 +539,14 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB9_5: # %cond.load
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB9_2
; RV64ZVE32F-NEXT: .LBB9_6: # %cond.load1
; RV64ZVE32F-NEXT: ld a2, 8(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
@@ -555,7 +555,7 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV64ZVE32F-NEXT: beqz a2, .LBB9_3
; RV64ZVE32F-NEXT: .LBB9_7: # %cond.load4
; RV64ZVE32F-NEXT: ld a2, 16(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
@@ -564,7 +564,7 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV64ZVE32F-NEXT: beqz a1, .LBB9_4
; RV64ZVE32F-NEXT: .LBB9_8: # %cond.load7
; RV64ZVE32F-NEXT: ld a0, 24(a0)
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
@@ -641,14 +641,14 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB11_9: # %cond.load
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB11_2
; RV64ZVE32F-NEXT: .LBB11_10: # %cond.load1
; RV64ZVE32F-NEXT: ld a2, 8(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
@@ -657,7 +657,7 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a2, .LBB11_3
; RV64ZVE32F-NEXT: .LBB11_11: # %cond.load4
; RV64ZVE32F-NEXT: ld a2, 16(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
@@ -666,7 +666,7 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a2, .LBB11_4
; RV64ZVE32F-NEXT: .LBB11_12: # %cond.load7
; RV64ZVE32F-NEXT: ld a2, 24(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
@@ -675,7 +675,7 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a2, .LBB11_5
; RV64ZVE32F-NEXT: .LBB11_13: # %cond.load10
; RV64ZVE32F-NEXT: ld a2, 32(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
@@ -684,7 +684,7 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a2, .LBB11_6
; RV64ZVE32F-NEXT: .LBB11_14: # %cond.load13
; RV64ZVE32F-NEXT: ld a2, 40(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
@@ -693,7 +693,7 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a2, .LBB11_7
; RV64ZVE32F-NEXT: .LBB11_15: # %cond.load16
; RV64ZVE32F-NEXT: ld a2, 48(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
@@ -702,7 +702,7 @@ define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru)
; RV64ZVE32F-NEXT: beqz a1, .LBB11_8
; RV64ZVE32F-NEXT: .LBB11_16: # %cond.load19
; RV64ZVE32F-NEXT: ld a0, 56(a0)
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
@@ -739,7 +739,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: .LBB12_2: # %else
@@ -750,7 +750,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1
@@ -762,7 +762,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
@@ -782,7 +782,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5
@@ -802,7 +802,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3
@@ -812,7 +812,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
@@ -823,7 +823,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
@@ -834,7 +834,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
@@ -12329,7 +12329,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: .LBB97_2: # %else
@@ -12340,7 +12340,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1
@@ -12352,7 +12352,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 2
@@ -12372,7 +12372,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 5
@@ -12397,7 +12397,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 9
@@ -12409,7 +12409,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 10
@@ -12429,7 +12429,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 13
@@ -12441,7 +12441,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: # %bb.22: # %cond.load40
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 14
@@ -12454,7 +12454,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15
@@ -12466,7 +12466,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 3
@@ -12476,7 +12476,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, ma
@@ -12487,7 +12487,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: .LBB97_28: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 6
@@ -12498,7 +12498,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 7
@@ -12508,7 +12508,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, ma
@@ -12521,7 +12521,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 11
@@ -12531,7 +12531,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m1, tu, ma
@@ -12589,7 +12589,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
@@ -12601,7 +12601,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12615,7 +12615,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12637,7 +12637,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v12, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12664,7 +12664,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12678,7 +12678,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12694,7 +12694,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12708,7 +12708,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: # %bb.20: # %cond.load34
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12722,7 +12722,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12747,7 +12747,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12761,7 +12761,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: # %bb.29: # %cond.load52
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12783,7 +12783,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12810,7 +12810,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12824,7 +12824,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: # %bb.40: # %cond.load76
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12846,7 +12846,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12860,7 +12860,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: # %bb.46: # %cond.load88
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12875,7 +12875,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: li a1, 32
; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
@@ -12889,7 +12889,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12901,7 +12901,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12913,7 +12913,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_52: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12926,7 +12926,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12938,7 +12938,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
@@ -12950,7 +12950,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_55: # %cond.load40
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12963,7 +12963,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12975,7 +12975,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -12989,7 +12989,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -13001,7 +13001,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -13013,7 +13013,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_60: # %cond.load64
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -13026,7 +13026,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -13038,7 +13038,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -13052,7 +13052,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
@@ -13064,7 +13064,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 90b2dd9f03830..863544e5273ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -62,7 +62,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-NEXT: .LBB4_3: # %cond.load
; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: lb a2, 1(a1)
+; RV32-NEXT: lbu a2, 1(a1)
; RV32-NEXT: lbu a1, 0(a1)
; RV32-NEXT: slli a2, a2, 8
; RV32-NEXT: or a1, a2, a1
@@ -74,7 +74,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: lb a1, 1(a0)
+; RV32-NEXT: lbu a1, 1(a0)
; RV32-NEXT: lbu a0, 0(a0)
; RV32-NEXT: slli a1, a1, 8
; RV32-NEXT: or a0, a1, a0
@@ -99,7 +99,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-NEXT: .LBB4_3: # %cond.load
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: lb a2, 1(a1)
+; RV64-NEXT: lbu a2, 1(a1)
; RV64-NEXT: lbu a1, 0(a1)
; RV64-NEXT: slli a2, a2, 8
; RV64-NEXT: or a1, a2, a1
@@ -111,7 +111,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 1
; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: lb a1, 1(a0)
+; RV64-NEXT: lbu a1, 1(a0)
; RV64-NEXT: lbu a0, 0(a0)
; RV64-NEXT: slli a1, a1, 8
; RV64-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 91efe2c223a07..fc7f1f588369f 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -310,7 +310,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
; RV32-NEXT: mv s0, a0
-; RV32-NEXT: lb a0, 12(a0)
+; RV32-NEXT: lbu a0, 12(a0)
; RV32-NEXT: lw a1, 8(s0)
; RV32-NEXT: slli a2, a0, 30
; RV32-NEXT: lw a3, 4(s0)
@@ -389,7 +389,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: mv s0, a0
-; RV64-NEXT: lb a0, 12(a0)
+; RV64-NEXT: lbu a0, 12(a0)
; RV64-NEXT: lwu a1, 8(s0)
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: ld a2, 0(s0)
@@ -460,7 +460,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32M-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
; RV32M-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
; RV32M-NEXT: mv s0, a0
-; RV32M-NEXT: lb a0, 12(a0)
+; RV32M-NEXT: lbu a0, 12(a0)
; RV32M-NEXT: lw a1, 8(s0)
; RV32M-NEXT: slli a2, a0, 30
; RV32M-NEXT: lw a3, 4(s0)
@@ -535,7 +535,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64M-NEXT: ld a1, 0(a0)
; RV64M-NEXT: lwu a2, 8(a0)
; RV64M-NEXT: srli a3, a1, 2
-; RV64M-NEXT: lb a4, 12(a0)
+; RV64M-NEXT: lbu a4, 12(a0)
; RV64M-NEXT: slli a5, a2, 62
; RV64M-NEXT: or a3, a5, a3
; RV64M-NEXT: srai a3, a3, 31
@@ -610,7 +610,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: mv s2, a0
; RV32MV-NEXT: lw a0, 8(a0)
; RV32MV-NEXT: lw a1, 4(s2)
-; RV32MV-NEXT: lb a2, 12(s2)
+; RV32MV-NEXT: lbu a2, 12(s2)
; RV32MV-NEXT: slli a3, a0, 31
; RV32MV-NEXT: srli a4, a1, 1
; RV32MV-NEXT: or s3, a4, a3
@@ -712,7 +712,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64MV-NEXT: addi s0, sp, 64
; RV64MV-NEXT: andi sp, sp, -32
-; RV64MV-NEXT: lb a1, 12(a0)
+; RV64MV-NEXT: lbu a1, 12(a0)
; RV64MV-NEXT: lwu a2, 8(a0)
; RV64MV-NEXT: slli a1, a1, 32
; RV64MV-NEXT: ld a3, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index d46e6c680aeff..083a06c0b3558 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -13,7 +13,7 @@
define i8 @load_i8(ptr %p) {
; ALL-LABEL: load_i8:
; ALL: # %bb.0:
-; ALL-NEXT: lb a0, 0(a0)
+; ALL-NEXT: lbu a0, 0(a0)
; ALL-NEXT: ret
%res = load i8, ptr %p, align 1
ret i8 %res
@@ -22,7 +22,7 @@ define i8 @load_i8(ptr %p) {
define i16 @load_i16(ptr %p) {
; NOMISALIGN-LABEL: load_i16:
; NOMISALIGN: # %bb.0:
-; NOMISALIGN-NEXT: lb a1, 1(a0)
+; NOMISALIGN-NEXT: lbu a1, 1(a0)
; NOMISALIGN-NEXT: lbu a0, 0(a0)
; NOMISALIGN-NEXT: slli a1, a1, 8
; NOMISALIGN-NEXT: or a0, a1, a0
@@ -41,7 +41,7 @@ define i24 @load_i24(ptr %p) {
; NOMISALIGN: # %bb.0:
; NOMISALIGN-NEXT: lbu a1, 1(a0)
; NOMISALIGN-NEXT: lbu a2, 0(a0)
-; NOMISALIGN-NEXT: lb a0, 2(a0)
+; NOMISALIGN-NEXT: lbu a0, 2(a0)
; NOMISALIGN-NEXT: slli a1, a1, 8
; NOMISALIGN-NEXT: or a1, a1, a2
; NOMISALIGN-NEXT: slli a0, a0, 16
@@ -50,7 +50,7 @@ define i24 @load_i24(ptr %p) {
;
; MISALIGN-LABEL: load_i24:
; MISALIGN: # %bb.0:
-; MISALIGN-NEXT: lb a1, 2(a0)
+; MISALIGN-NEXT: lbu a1, 2(a0)
; MISALIGN-NEXT: lhu a0, 0(a0)
; MISALIGN-NEXT: slli a1, a1, 16
; MISALIGN-NEXT: or a0, a0, a1
@@ -60,33 +60,19 @@ define i24 @load_i24(ptr %p) {
}
define i32 @load_i32(ptr %p) {
-; RV32I-LABEL: load_i32:
-; RV32I: # %bb.0:
-; RV32I-NEXT: lbu a1, 1(a0)
-; RV32I-NEXT: lbu a2, 0(a0)
-; RV32I-NEXT: lbu a3, 2(a0)
-; RV32I-NEXT: lbu a0, 3(a0)
-; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: or a1, a1, a2
-; RV32I-NEXT: slli a3, a3, 16
-; RV32I-NEXT: slli a0, a0, 24
-; RV32I-NEXT: or a0, a0, a3
-; RV32I-NEXT: or a0, a0, a1
-; RV32I-NEXT: ret
-;
-; RV64I-LABEL: load_i32:
-; RV64I: # %bb.0:
-; RV64I-NEXT: lbu a1, 1(a0)
-; RV64I-NEXT: lbu a2, 0(a0)
-; RV64I-NEXT: lbu a3, 2(a0)
-; RV64I-NEXT: lb a0, 3(a0)
-; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: or a1, a1, a2
-; RV64I-NEXT: slli a3, a3, 16
-; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: ret
+; NOMISALIGN-LABEL: load_i32:
+; NOMISALIGN: # %bb.0:
+; NOMISALIGN-NEXT: lbu a1, 1(a0)
+; NOMISALIGN-NEXT: lbu a2, 0(a0)
+; NOMISALIGN-NEXT: lbu a3, 2(a0)
+; NOMISALIGN-NEXT: lbu a0, 3(a0)
+; NOMISALIGN-NEXT: slli a1, a1, 8
+; NOMISALIGN-NEXT: or a1, a1, a2
+; NOMISALIGN-NEXT: slli a3, a3, 16
+; NOMISALIGN-NEXT: slli a0, a0, 24
+; NOMISALIGN-NEXT: or a0, a0, a3
+; NOMISALIGN-NEXT: or a0, a0, a1
+; NOMISALIGN-NEXT: ret
;
; MISALIGN-LABEL: load_i32:
; MISALIGN: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 36d064ea3d505..c3cb8abd82019 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -330,7 +330,7 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: mv s0, a0
-; RV32-NEXT: lb a0, 4(a0)
+; RV32-NEXT: lbu a0, 4(a0)
; RV32-NEXT: lw a1, 0(s0)
; RV32-NEXT: slli a0, a0, 10
; RV32-NEXT: srli s1, a1, 22
@@ -437,7 +437,7 @@ define void @test_urem_vec(ptr %X) nounwind {
;
; RV32M-LABEL: test_urem_vec:
; RV32M: # %bb.0:
-; RV32M-NEXT: lb a1, 4(a0)
+; RV32M-NEXT: lbu a1, 4(a0)
; RV32M-NEXT: lw a2, 0(a0)
; RV32M-NEXT: slli a1, a1, 10
; RV32M-NEXT: srli a3, a2, 22
@@ -528,7 +528,7 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32MV-NEXT: slli a2, a1, 10
; RV32MV-NEXT: srli a2, a2, 21
; RV32MV-NEXT: sh a2, 10(sp)
-; RV32MV-NEXT: lb a2, 4(a0)
+; RV32MV-NEXT: lbu a2, 4(a0)
; RV32MV-NEXT: slli a2, a2, 10
; RV32MV-NEXT: srli a1, a1, 22
; RV32MV-NEXT: or a1, a1, a2
More information about the llvm-commits
mailing list